diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..cf7fa5b6 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,70 @@ +// See https://aka.ms/vscode-remote/devcontainer.json for format details. +{ + "name": "Demo Elevation Mapping CUPY Workspace", + "dockerFile": "../docker/Dockerfile.x64", + "context": "../", + "remoteUser": "ros", + "runArgs": [ + "--network=host", + "--cap-add=SYS_PTRACE", + "--cap-add=SYS_NICE", + "--security-opt=seccomp:unconfined", + "--security-opt=apparmor:unconfined", + // "--volume=/tmp/.X11-unix:/tmp/.X11-unix", + "--volume=/mnt/wslg:/mnt/wslg", + "--ipc=host", + "--pid=host", + "--runtime=nvidia", + "--gpus=all", + "--privileged", + "--ulimit=rtprio=98", + "--ulimit=rttime=-1", + "--ulimit=memlock=8428281856", + "--name=emcupy-ros2-devcontainer" + ], + "containerEnv": { + "DISPLAY": "${localEnv:DISPLAY}", // Needed for GUI try ":0" for windows + "WAYLAND_DISPLAY": "${localEnv:WAYLAND_DISPLAY}", + "XDG_RUNTIME_DIR": "${localEnv:XDG_RUNTIME_DIR}", + "PULSE_SERVER": "${localEnv:PULSE_SERVER}", + "LIBGL_ALWAYS_SOFTWARE": "1" // Needed for software rendering of opengl + }, + "mounts": [ + "source=/dev,target=/dev,type=bind,consistency=cached" + // "source=~/bag_files,target=/workspaces/vscode-container-workspace/bag_files,type=bind,consistency=cached" + ], + "customizations": { + "vscode": { + "settings": { + "remote.autoForwardPorts": false, + "remote.autoForwardPortsSource": "output", + "otherPortsAttributes": { "onAutoForward" : "ignore" } + }, + "extensions": [ + "althack.ament-task-provider", + "betwo.b2-catkin-tools", + "DotJoshJohnson.xml", + "ms-azuretools.vscode-docker", + "ms-vscode.cmake-tools", + "ms-python.python", + "ms-vscode.cpptools", + "redhat.vscode-yaml", + "smilerobotics.urdf", + "streetsidesoftware.code-spell-checker", + "twxs.cmake", + "yzhang.markdown-all-in-one", + "zachflower.uncrustify", + "mhutchie.git-graph", + "eamodio.gitlens", + "ms-vscode.cpptools-extension-pack", + "usernamehw.errorlens", + "ms-iot.vscode-ros", + "alefragnani.Bookmarks", + "ms-vscode.live-server" + + ] + } + }, + "workspaceMount": "source=${localWorkspaceFolder}/,target=/home/ros/workspace/src/elevation_mapping_cupy/,type=bind", + "workspaceFolder": "/home/ros/workspace/src/elevation_mapping_cupy/" +} \ No newline at end of file diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml new file mode 100644 index 00000000..2e91434d --- /dev/null +++ b/.github/workflows/documentation.yml @@ -0,0 +1,23 @@ +name: Docs +on: [ push, pull_request, workflow_dispatch ] +jobs: + docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - name: Install dependencies + run: | + pip install sphinx sphinx_rtd_theme sphinx-copybutton + pip install -r docs/requirements.txt + - name: Sphinx build + run: | + sphinx-build docs/source docs/_build + - name: Deploy + uses: peaceiris/actions-gh-pages@v3 + if: ${{ github.event_name == 'push' }} + with: + publish_branch: gh-pages + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: docs/_build/ + force_orphan: true \ No newline at end of file diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml new file mode 100644 index 00000000..0f341544 --- /dev/null +++ b/.github/workflows/python-tests.yml @@ -0,0 +1,40 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python + +name: Python testing + +on: + push: + branches: [ "feature/2_semantic_python", "feature/**","dev/**" ] +# pull_request: +# branches: [ "main" ] + +permissions: + contents: read + +jobs: + build: + + # runs-on: ubuntu-latest + runs-on: [ self-hosted, Linux, X64 ] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.8 + uses: actions/setup-python@v3 + with: + python-version: "3.8" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest cupy-cuda11x + python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' + pip install torch torchvision torchaudio + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Test elevation mapping with pytest + run: | + cd elevation_mapping_cupy/script/elevation_mapping_cupy/tests/ + pytest + - name: Test semantic_sensor with pytest + run: | + cd sensor_processing/semantic_sensor/script/semantic_sensor/tests + pytest test_utils.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..41fde99f --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +*.pyc +*.bkp +*.orig +.idea* +.pytest_cache +.run/ +docs/build +_build +.idea* +.vscode* +*.egg-info +elevation_mapping_cupy/compile_commands.json diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..584d0254 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 ETH Zurich, Takahiro Miki + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 00000000..27cb1a6c --- /dev/null +++ b/README.md @@ -0,0 +1,151 @@ +# ROS2 Elevation Mapping Cupy + +![python tests](https://github.com/leggedrobotics/elevation_mapping_cupy/actions/workflows/python-tests.yml/badge.svg) + +[Documentation](https://leggedrobotics.github.io/elevation_mapping_cupy/) + +## Overview + +The Elevaton Mapping CuPy software package represents an advancement in robotic navigation and locomotion. +Integrating with the Robot Operating System (ROS) and utilizing GPU acceleration, this framework enhances point cloud registration and ray casting, +crucial for efficient and accurate robotic movement, particularly in legged robots. +![screenshot](docs/media/main_repo.png) +![screenshot](docs/media/main_mem.png) +![gif](docs/media/convex_approximation.gif) + +## Key Features + +- **Height Drift Compensation**: Tackles state estimation drifts that can create mapping artifacts, ensuring more accurate terrain representation. + +- **Visibility Cleanup and Artifact Removal**: Raycasting methods and an exclusion zone feature are designed to remove virtual artifacts and correctly interpret overhanging obstacles, preventing misidentification as walls. + +- **Learning-based Traversability Filter**: Assesses terrain traversability using local geometry, improving path planning and navigation. + +- **Versatile Locomotion Tools**: Incorporates smoothing filters and plane segmentation, optimizing movement across various terrains. + +- **Multi-Modal Elevation Map (MEM) Framework**: Allows seamless integration of diverse data like geometry, semantics, and RGB information, enhancing multi-modal robotic perception. + +- **GPU-Enhanced Efficiency**: Facilitates rapid processing of large data structures, crucial for real-time applications. + +## Overview + +![Overview of multi-modal elevation map structure](docs/media/overview.png) + +Overview of our multi-modal elevation map structure. The framework takes multi-modal images (purple) and multi-modal (blue) point clouds as +input. This data is input into the elevation map by first associating the data to the cells and then fused with different fusion algorithms into the various +layers of the map. Finally the map can be post-processed with various custom plugins to generate new layers (e.g. traversability) or process layer for +external components (e.g. line detection). + +## Citing + +If you use the Elevation Mapping CuPy, please cite the following paper: +Elevation Mapping for Locomotion and Navigation using GPU + +[Elevation Mapping for Locomotion and Navigation using GPU](https://arxiv.org/abs/2204.12876) + +Takahiro Miki, Lorenz Wellhausen, Ruben Grandia, Fabian Jenelten, Timon Homberger, Marco Hutter + +```bibtex +@inproceedings{miki2022elevation, + title={Elevation mapping for locomotion and navigation using gpu}, + author={Miki, Takahiro and Wellhausen, Lorenz and Grandia, Ruben and Jenelten, Fabian and Homberger, Timon and Hutter, Marco}, + booktitle={2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, + pages={2273--2280}, + year={2022}, + organization={IEEE} +} +``` + +If you use the Multi-modal Elevation Mapping for color or semantic layers, please cite the following paper: + +[MEM: Multi-Modal Elevation Mapping for Robotics and Learning](https://arxiv.org/abs/2309.16818v1) + +Gian Erni, Jonas Frey, Takahiro Miki, Matias Mattamala, Marco Hutter + +```bibtex +@inproceedings{erni2023mem, + title={MEM: Multi-Modal Elevation Mapping for Robotics and Learning}, + author={Erni, Gian and Frey, Jonas and Miki, Takahiro and Mattamala, Matias and Hutter, Marco}, + booktitle={2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, + pages={11011--11018}, + year={2023}, + organization={IEEE} +} +``` + +## Quick instructions to run + +### Installation + +First, clone to your catkin_ws + +```zsh +mkdir -p catkin_ws/src +cd catkin_ws/src +git clone https://github.com/leggedrobotics/elevation_mapping_cupy.git +``` + +Then install dependencies. +You can also use docker which already install all dependencies. +When you run the script it should pull the image. + +```zsh +cd docker +./run.sh +``` + +You can also build locally by running `build.sh`, but in this case change `IMAGE_NAME` in `run.sh` to `elevation_mapping_cupy:latest`. + +For more information, check [Document](https://leggedrobotics.github.io/elevation_mapping_cupy/getting_started/installation.html) + +### Build package + +Inside docker container. + +```zsh +cd $HOME/catkin_ws +catkin build elevation_mapping_cupy +catkin build convex_plane_decomposition_ros # If you want to use plane segmentation +catkin build semantic_sensor # If you want to use semantic sensors +``` + +### Run turtlebot example + +![Elevation map examples](docs/media/turtlebot.png) + +```bash +export TURTLEBOT3_MODEL=waffle +roslaunch elevation_mapping_cupy turtlesim_simple_example.launch +``` + +For fusing semantics into the map such as rgb from a multi modal pointcloud: + +```bash +export TURTLEBOT3_MODEL=waffle +roslaunch elevation_mapping_cupy turtlesim_semantic_pointcloud_example.launch +``` + +For fusing semantics into the map such as rgb semantics or features from an image: + +```bash +export TURTLEBOT3_MODEL=waffle +roslaunch elevation_mapping_cupy turtlesim_semantic_image_example.launch +``` + +For plane segmentation: + +```bash +catkin build convex_plane_decomposition_ros +export TURTLEBOT3_MODEL=waffle +roslaunch elevation_mapping_cupy turtlesim_plane_decomposition_example.launch +``` + +To control the robot with a keyboard, a new terminal window needs to be opened. +Then run + +```bash +export TURTLEBOT3_MODEL=waffle +roslaunch turtlebot3_teleop turtlebot3_teleop_key.launch +``` + +Velocity inputs can be sent to the robot by pressing the keys `a`, `w`, `d`, `x`. To stop the robot completely, press `s`. diff --git a/TODO.md b/TODO.md new file mode 100644 index 00000000..89df8dc9 --- /dev/null +++ b/TODO.md @@ -0,0 +1,11 @@ +- [x] Compile "elevation_mapping_msgs" with ROS2 +- [x] Compile "elevation_mapping_cupy" with ROS2 +- [x] Compile "semantic_sensor" with ROS2 +- [x] Compile "convex_plane_decomposition_msgs" with ROS2 +- [ ] Compile "convex_plane_decomposition" with ROS2 +- [x] Compile "grid_map_filters_rsl" with ROS2 +- [x] Compile "cgal5_ament" with ROS2 + +- [x] Run "elevation_mapping_cupy" with ROS2 +- [ ] Run "semantic_sensor" with ROS2 +- [ ] Run "convex_plane_decomposition" with ROS2 diff --git a/docker/Dockerfile.x64 b/docker/Dockerfile.x64 new file mode 100644 index 00000000..e0fa08c9 --- /dev/null +++ b/docker/Dockerfile.x64 @@ -0,0 +1,253 @@ +# syntax=docker/dockerfile:1.4 + +# Base image +FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04 AS base + +# Metadata +LABEL description="ROS2 environment with CUDA support" +LABEL version="1.0" + +# Build arguments +ARG ROS_DISTRO=humble +ARG USERNAME=ros +ARG USER_UID=1000 +ARG USER_GID=$USER_UID +ARG RMW_NAME=zenoh +ARG INSTALL_EMCUPY_ROSDEPS=true + +# Environment variables +ENV DEBIAN_FRONTEND=noninteractive \ + LANG=en_US.UTF-8 \ + LC_ALL=${LANG}\ + TZ=UTC \ + PYTHONUNBUFFERED=1 \ + ROS_DISTRO=${ROS_DISTRO} \ + ROS_ROOT=/opt/ros/${ROS_DISTRO} \ + AMENT_PREFIX_PATH=/opt/ros/${ROS_DISTRO} \ + COLCON_PREFIX_PATH=/opt/ros/${ROS_DISTRO} \ + LD_LIBRARY_PATH=/opt/ros/${ROS_DISTRO}/lib:/usr/local/cuda/lib64 \ + PATH=/opt/ros/${ROS_DISTRO}/bin:/usr/src/tensorrt/bin:/usr/local/cuda/bin:$PATH \ + PYTHONPATH=/opt/ros/${ROS_DISTRO}/lib/python3.10/site-packages \ + # Used by various ROS2 packages + RMW_IMPLEMENTATION=rmw_${RMW_NAME}_cpp \ + # Should be the same as above but with dashes instead of underscores + RMW_IMPLEMENTATION_DASH=rmw-${RMW_NAME}-cpp + +# Install basic utilities and dependencies +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + apt update && apt install -y --no-install-recommends \ + locales \ + tzdata \ + curl \ + gnupg2 \ + lsb-release \ + sudo \ + software-properties-common \ + wget \ + git \ + git-lfs \ + nano \ + && locale-gen ${LANG} \ + && update-locale LC_ALL=${LC_ALL} LANG=${LANG}\ + && ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime \ + && dpkg-reconfigure -f ${DEBIAN_FRONTEND} tzdata \ + && apt clean \ + && rm -rf /var/lib/apt/lists/* + +# Install ROS2 +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" | tee /etc/apt/sources.list.d/ros2.list > /dev/null \ + && apt update && apt install -y --no-install-recommends \ + ros-${ROS_DISTRO}-ros-base \ + python3-argcomplete \ + ros-${ROS_DISTRO}-${RMW_IMPLEMENTATION_DASH} \ + && apt clean \ + && rm -rf /var/lib/apt/lists/* + +# Development stage +FROM base AS dev + +# Install development tools and dependencies +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + apt update && apt install -y --no-install-recommends \ + bash-completion \ + build-essential \ + cmake \ + gdb \ + openssh-client \ + python3-pip \ + vim \ + doxygen \ + graphviz \ + python3-sphinx \ + python3-breathe \ + ros-dev-tools \ + ros-${ROS_DISTRO}-ament-* \ + python3-rosdep \ + libxine2-dev \ + libtiff5-dev \ + libpostproc-dev \ + libopencv-dev \ + && apt clean \ + && rm -rf /var/lib/apt/lists/* + +# Pytorch +# Used by elevation_mapping_cupy and not rosdep resolvable for cuda version +RUN python3 -m pip install -U --extra-index-url https://download.pytorch.org/whl/cu121 \ + torch \ + torchvision \ + torchaudio + +# Install Python packages +RUN python3 -m pip install \ + # colcon extension to enable easier workspace cleaning + colcon-clean\ + # Almost all ros2 packages + rosdoc2\ + # sphinx is used across packages for documentation + sphinx_rtd_theme \ + sphinx-multiversion \ + sphinx-copybutton\ + sphinx-tabs\ + # For VS Code python code formatting I think? + autopep8\ + # Used by multiple packages for linting I think? + flake8-builtins\ + flake8-comprehensions\ + flake8-docstrings\ + flake8-import-order\ + flake8-class-newline\ + flake8-blind-except\ + flake8-quotes + # For Python code formatting. Not sure what package uses it + # black==21.12b0\ +#### For elevation_mapping_cupy #### +RUN if [ "$INSTALL_EMCUPY_ROSDEPS" = true ]; then \ + python3 -m pip install \ + # rosdep resolves with apt python-ruamel.yaml + # ruamel.yaml\ + # rosdep resolves with apt python3-sklearn - might not be needed + scikit-learn\ + # rosdep resolves with apt python3-shapely + # shapely\ + # rosdep resolves with apt python3-opencv + opencv-python\ + # rosdep resolves using extra rosdeps + cupy-cuda12x\ + # rosdep resolves using extra rosdeps + simple-parsing\ + # rosdep resolves using extra rosdeps + "numpy<2.0.0"; \ + fi + +# Needed for elevation_mapping_cupy to not have run-time errors +RUN python3 -m pip install transforms3d scipy -U + +# Set up non-root user +RUN groupadd --gid $USER_GID $USERNAME \ + && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME \ + && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ + && chmod 0440 /etc/sudoers.d/$USERNAME + +# Set up autocompletion and source ROS environment for user +RUN echo "source /opt/ros/${ROS_DISTRO}/setup.bash" >> /home/$USERNAME/.bashrc \ + && echo "source /usr/share/colcon_argcomplete/hook/colcon-argcomplete.bash" >> /home/$USERNAME/.bashrc + +# For building GPU supported containers? +# Install NVIDIA Container Toolkit +RUN distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ + && curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \ + && curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list \ + && apt update && apt install -y nvidia-container-toolkit \ + && apt clean \ + && rm -rf /var/lib/apt/lists/* + + +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + if [ "$INSTALL_EMCUPY_ROSDEPS" = true ]; then \ + apt update && DEBIAN_FRONTEND=${DEBIAN_FRONTEND} apt install -y --no-install-recommends \ + # resolved by rosdep as boost + libboost-all-dev\ + ### For elevation_mapping_cupy #### + # rosdep grid_map_msgs + ros-${ROS_DISTRO}-grid-map-msgs\ + # rosdep grid_map_ros + ros-${ROS_DISTRO}-grid-map-ros\ + # rosdep image_transport + ros-${ROS_DISTRO}-image-transport\ + # rosdep pcl_ros + ros-${ROS_DISTRO}-pcl-ros\ + # rosdep cv_bridge + ros-${ROS_DISTRO}-cv-bridge\ + # rosdep tf-transformations + ros-${ROS_DISTRO}-tf-transformations\ + # rosdep rviz2 + ros-${ROS_DISTRO}-rviz2\ + # replacing with rosdep gazebo_ros_pkgs + ros-${ROS_DISTRO}-gazebo-ros\ + # rosdep grid_map_cv + ros-${ROS_DISTRO}-grid-map-cv\ + # rosdep grid_map_core + ros-${ROS_DISTRO}-grid-map-core\ + # rosdep grid_map_demos + ros-${ROS_DISTRO}-grid-map-demos\ + ros-${ROS_DISTRO}-point-cloud-transport\ + python3-shapely\ + python3-scipy\ + python3-ruamel.yaml\ + ################################# + ### For debugging elevation_mapping_cupy by setting up turtlebot3_simulations#### + # ros-${ROS_DISTRO}-turtlebot3*\ + ros-${ROS_DISTRO}-camera-calibration-parsers\ + ros-${ROS_DISTRO}-camera-info-manager\ + ros-${ROS_DISTRO}-gazebo-plugins\ + ros-${ROS_DISTRO}-turtlebot3-msgs\ + ros-${ROS_DISTRO}-turtlebot3-teleop\ + # replacing with rosdep gazebo_ros_pkgs + ros-${ROS_DISTRO}-gazebo-ros-pkgs\ + ############################################# + && apt clean && \ + rm -rf /var/lib/apt/lists/* ;\ + fi +# Development stage +FROM dev as dev2 +ARG WORKSPACE="/home/${USERNAME}/workspace" + +RUN mkdir -p ${WORKSPACE}/src/elevation_mapping_cupy && chown -R ${USER_UID}:${USER_GID} /home/${USERNAME} +RUN echo "if [ -f ${WORKSPACE}/install/setup.bash ]; then source ${WORKSPACE}/install/setup.bash; fi" >> /home/ros/.bashrc +WORKDIR ${WORKSPACE} + + +# Setup rosdep +RUN rosdep init +# COPY docker/extra_rosdeps.yaml /etc/ros/rosdep/sources.list.d/emcupy-rosdeps-fixed.yaml +# Setup rosdep with extra rosdeps and install them for elevation_mapping_cupy +# RUN --mount=type=cache,target=/home/${USERNAME}/.ros/rosdep/sources.cache \ +# RUN if [ "$INSTALL_EMCUPY_ROSDEPS" = true ]; then \ +# echo "yaml file:///etc/ros/rosdep/sources.list.d/emcupy-rosdeps-fixed.yaml" | tee /etc/ros/rosdep/sources.list.d/01-emcupy-rosdeps.list && \ +# rosdep update && \ +# # This install doesn't seem to actually install anything, so removing +# apt update && \ +# rosdep install --from-paths . --ignore-src -y -r; \ +# fi +# Now add the same file again, but this one is symlinked so will change if you change the file in your workspace +RUN ln -s ${WORKSPACE}/src/elevation_mapping_cupy/docker/extra_rosdeps.yaml /etc/ros/rosdep/sources.list.d/emcupy-rosdeps.yaml && \ + echo "yaml file:///etc/ros/rosdep/sources.list.d/emcupy-rosdeps.yaml" | tee /etc/ros/rosdep/sources.list.d/00-emcupy-rosdeps.list; + + +# Switch to non-root user +USER $USERNAME + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD ros2 topic list > /dev/null 2>&1 || exit 1 + +# Set ID to value not used by others on your network +ENV ROS_DOMAIN_ID=12 + +# For elevation_mapping_cupy +ENV TURTLEBOT3_MODEL=waffle_realsense_depth + +# Set the default command to bash +CMD ["bash"] \ No newline at end of file diff --git a/docker/build.sh b/docker/build.sh new file mode 100755 index 00000000..6d67ad53 --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e +cd ~/workspace +# Set the default build type +source /opt/ros/$ROS_DISTRO/setup.bash +BUILD_TYPE=RelWithDebInfo #Debug, Release, RelWithDebInfo, MinSizeRel +colcon build \ + --continue-on-error \ + --parallel-workers $(nproc) \ + --merge-install \ + --symlink-install \ + --event-handlers console_cohesion+ \ + --base-paths src \ + --cmake-args \ + "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" \ + "-DCMAKE_EXPORT_COMPILE_COMMANDS=On" \ + "-DBUILD_TESTING=OFF"\ + "-DCMAKE_CXX_FLAGS="-Wl,--allow-shlib-undefined""\ + -Wall -Wextra -Wpedantic -Wshadow \ + --packages-skip \ + convex_plane_decomposition \ + convex_plane_decomposition_ros \ No newline at end of file diff --git a/docker/extra_rosdeps.yaml b/docker/extra_rosdeps.yaml new file mode 100644 index 00000000..5081f24b --- /dev/null +++ b/docker/extra_rosdeps.yaml @@ -0,0 +1,12 @@ +numpy_lessthan_2: + ubuntu: + pip: + packages: ["numpy<2.0.0"] +simple-parsing: + ubuntu: + pip: + packages: [simple-parsing] +cupy-cuda12x: + ubuntu: + pip: + packages: [cupy-cuda12x] \ No newline at end of file diff --git a/docker/run.sh b/docker/run.sh new file mode 100755 index 00000000..125928fd --- /dev/null +++ b/docker/run.sh @@ -0,0 +1,49 @@ +#!/bin/bash +IMAGE_NAME="mktk1117/elevation_mapping_cupy:latest" + +# Define environment variables for enabling graphical output for the container. +XSOCK=/tmp/.X11-unix +XAUTH=/tmp/.docker.xauth +if [ ! -f $XAUTH ] +then + touch $XAUTH + xauth_list=$(xauth nlist :0 | sed -e 's/^..../ffff/') + xauth nlist $DISPLAY | sed -e 's/^..../ffff/' | xauth -f $XAUTH nmerge - + chmod a+r $XAUTH +fi + +#== +# Launch container +#== + +# Create symlinks to user configs within the build context. +mkdir -p .etc && cd .etc +ln -sf /etc/passwd . +ln -sf /etc/shadow . +ln -sf /etc/group . +cd .. + +# Launch a container from the prebuilt image. +echo "---------------------" +RUN_COMMAND="docker run \ + --volume=$XSOCK:$XSOCK:rw \ + --volume=$XAUTH:$XAUTH:rw \ + --env="QT_X11_NO_MITSHM=1" \ + --env="XAUTHORITY=$XAUTH" \ + --env="DISPLAY=$DISPLAY" \ + --ulimit rtprio=99 \ + --cap-add=sys_nice \ + --privileged \ + --net=host \ + -eHOST_USERNAME=$(whoami) \ + -v$HOME:$HOME \ + -v$(pwd)/.etc/shadow:/etc/shadow \ + -v$(pwd)/.etc/passwd:/etc/passwd \ + -v$(pwd)/.etc/group:/etc/group \ + -v/media:/media \ + --gpus all \ + -it $IMAGE_NAME" +echo -e "[run.sh]: \e[1;32mThe final run command is\n\e[0;35m$RUN_COMMAND\e[0m." +$RUN_COMMAND +echo -e "[run.sh]: \e[1;32mDocker terminal closed.\e[0m" +# --entrypoint=$ENTRYPOINT \ \ No newline at end of file diff --git a/docker/setup.sh b/docker/setup.sh new file mode 100755 index 00000000..128e53bc --- /dev/null +++ b/docker/setup.sh @@ -0,0 +1,7 @@ +#!/bin/bash +cd ~/workspace +vcs import < src/elevation_mapping_cupy/docker/src.repos src/ --recursive -w $(($(nproc)/2)) + +sudo apt update +rosdep update +rosdep install --from-paths src --ignore-src -y -r \ No newline at end of file diff --git a/docker/src.repos b/docker/src.repos new file mode 100644 index 00000000..b0d9f6c3 --- /dev/null +++ b/docker/src.repos @@ -0,0 +1,15 @@ +repositories: + # elevation_mapping_cupy: + # type: git + # url: https://github.com/jwag/elevation_mapping_cupy.git + # version: ros2_humble + # ros2_numpy needed by elevation_mapping_cupy + ros2_numpy: + type: git + url: https://github.com/Box-Robotics/ros2_numpy.git + version: humble + # added for testing elevation_mapping_cupy + turtlebot3_simulations: + type: git + url: https://github.com/jwag/turtlebot3_simulations.git + version: humble_realsense_depth diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..d0c3cbf1 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000..747ffb7b --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/media/convex_approximation.gif b/docs/media/convex_approximation.gif new file mode 100644 index 00000000..11b19c24 Binary files /dev/null and b/docs/media/convex_approximation.gif differ diff --git a/docs/media/main_mem.png b/docs/media/main_mem.png new file mode 100644 index 00000000..cda6a500 Binary files /dev/null and b/docs/media/main_mem.png differ diff --git a/docs/media/main_repo.png b/docs/media/main_repo.png new file mode 100644 index 00000000..22ecf718 Binary files /dev/null and b/docs/media/main_repo.png differ diff --git a/docs/media/overview.png b/docs/media/overview.png new file mode 100644 index 00000000..94d9d0b5 Binary files /dev/null and b/docs/media/overview.png differ diff --git a/docs/media/processing_time.png b/docs/media/processing_time.png new file mode 100644 index 00000000..d8c9370f Binary files /dev/null and b/docs/media/processing_time.png differ diff --git a/docs/media/real.png b/docs/media/real.png new file mode 100644 index 00000000..8722fb7f Binary files /dev/null and b/docs/media/real.png differ diff --git a/docs/media/sim.png b/docs/media/sim.png new file mode 100644 index 00000000..362df113 Binary files /dev/null and b/docs/media/sim.png differ diff --git a/docs/media/turtlebot.png b/docs/media/turtlebot.png new file mode 100644 index 00000000..e2b550aa Binary files /dev/null and b/docs/media/turtlebot.png differ diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..5498636f --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,15 @@ +###### Requirements without Version Specifiers ######` +numpy +scipy +dataclasses +ruamel.yaml +opencv-python +simple-parsing +scikit-image +matplotlib +catkin-tools +catkin_pkg +# cupy + +###### Requirements with Version Specifiers ######` +# shapely==1.7.1 diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 00000000..2d0c9090 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,89 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +import os, sys + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../elevation_mapping_cupy/script')) +sys.path.append(os.path.join(os.path.dirname(__file__), '../../sensor_processing/semantic_sensor/script')) + +autodoc_mock_imports = [ + "cupy", + "cupyx", + "rospy", + "ros_numpy" + "torchvision", + "numpy", + "scipy", + "sklearn", + "dataclasses", + "ruamel.yaml", + "opencv-python", + "simple-parsing", + "scikit-image", + "matplotlib", + "catkin-tools", + "catkin_pkg", + "detectron2", + "torch", + "shapely", + "simple_parsing", + ] + +on_rtd = os.environ.get("READTHEDOCS", None) == "True" + +if not on_rtd: # only import and set the theme if we're building docs locally + import sphinx_rtd_theme + + html_theme = "sphinx_rtd_theme" + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +project = "elevation_mapping_cupy" +copyright = "2022, Takahiro Miki, Gian Erni" +author = "Takahiro Miki, Gian Erni" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx_copybutton", +] + +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +html_theme_options = { + "analytics_anonymize_ip": False, + "logo_only": False, + "display_version": False, + "prev_next_buttons_location": "bottom", + "style_external_links": False, + "vcs_pageview_mode": "", + # "style_nav_header_background": "#A00000", + # Toc options + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": 4, + "includehidden": True, + "titles_only": False, +} + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_static_path = ["_static"] + +autodoc_default_options = { + "members": True, + "member-order": "bysource", + "special-members": "__init__", + "undoc-members": True, + "exclude-members": "__weakref__", +} diff --git a/docs/source/documentation.rst b/docs/source/documentation.rst new file mode 100644 index 00000000..3d057a63 --- /dev/null +++ b/docs/source/documentation.rst @@ -0,0 +1,69 @@ +################################################## +Multi-modal elevation mapping's documentation +################################################## +Welcome to elevation mapping documentation + +.. image:: https://github.com/leggedrobotics/elevation_mapping_semantic_cupy/actions/workflows/python-tests.yml/badge.svg + :target: https://github.com/leggedrobotics/elevation_mapping_semantic_cupy/actions/workflows/python-tests.yml/badge.svg + :alt: python tests + +.. image:: https://github.com/leggedrobotics/elevation_mapping_semantic_cupy/actions/workflows/documentation.yml/badge.svg + :target: https://github.com/leggedrobotics/elevation_mapping_semantic_cupy/actions/workflows/documentation.yml/badge.svg + :alt: documentation + +Index +--------------- + +| :doc:`getting_started/introduction` - What is elevation mapping cupy +| :doc:`getting_started/installation` - How to install the elevation map +| :doc:`getting_started/tutorial` - How to launch the first elevation map + + +This is a ROS package for elevation mapping on GPU. The elevation mapping code is written in python and uses cupy for GPU computation. The +plane segmentation is done independently and runs on CPU. When the plane segmentation is generated, local convex approximations of the +terrain can be efficiently generated. + +.. image:: ../media/main_repo.png + :alt: Elevation map examples +.. image:: ../media/main_mem.png + :alt: Overview of the project + + +Citing +--------------- +If you use the elevation mapping cupy, please cite the following paper: +Elevation Mapping for Locomotion and Navigation using GPU + +.. hint:: + + Elevation Mapping for Locomotion and Navigation using GPU `Link `_ + + Takahiro Miki, Lorenz Wellhausen, Ruben Grandia, Fabian Jenelten, Timon Homberger, Marco Hutter + +.. code-block:: + + @misc{mikielevation2022, + doi = {10.48550/ARXIV.2204.12876}, + author = {Miki, Takahiro and Wellhausen, Lorenz and Grandia, Ruben and Jenelten, Fabian and Homberger, Timon and Hutter, Marco}, + keywords = {Robotics (cs.RO), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Elevation Mapping for Locomotion and Navigation using GPU}, + publisher = {International Conference on Intelligent Robots and Systems (IROS)}, + year = {2022}, + } + +Multi-modal elevation mapping if you use color or semantic layers + +.. hint:: + + MEM: Multi-Modal Elevation Mapping for Robotics and Learning `Link `_ + + Gian Erni, Jonas Frey, Takahiro Miki, Matias Mattamala, Marco Hutter + +.. code-block:: + + @misc{Erni2023-bs, + title = "{MEM}: {Multi-Modal} Elevation Mapping for Robotics and Learning", + author = "Erni, Gian and Frey, Jonas and Miki, Takahiro and Mattamala, Matias and Hutter, Marco", + publisher = {International Conference on Intelligent Robots and Systems (IROS)}, + year = {2023}, + } diff --git a/docs/source/getting_started/cuda_installation.rst b/docs/source/getting_started/cuda_installation.rst new file mode 100644 index 00000000..0c4ede99 --- /dev/null +++ b/docs/source/getting_started/cuda_installation.rst @@ -0,0 +1,40 @@ +Cuda installation +================================================================== +.. _cuda_installation: + + +CUDA +------------------------------------------------------------------- + +You can download CUDA10.2 from `here `_. +You can follow the instruction. + +.. code-block:: bash + + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin + + sudo mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600 + + sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub + + sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ /" + + sudo apt-get update + + sudo apt-get -y install cuda + + +cuDNN +------------------------------------------------------------------- + +You can download specific version from `here `_. +For example, the tested version is with `libcudnn8_8.0.0.180-1+cuda10.2_amd64.deb`. + +Then install them using the command below. + +.. code-block:: bash + + sudo dpkg -i libcudnn8_8.0.0.180-1+cuda10.2_amd64.deb + + sudo dpkg -i libcudnn8-dev_8.0.0.180-1+cuda10.2_amd64.deb + diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst new file mode 100644 index 00000000..a32cdce2 --- /dev/null +++ b/docs/source/getting_started/installation.rst @@ -0,0 +1,297 @@ +.. _installation: + + +Installation +****************************************************************** + +This section provides instructions for installing the necessary dependencies for the project. The installation process includes setting up CUDA & cuDNN, installing Python dependencies, and configuring Cupy. +Follow the instructions carefully to avoid any installation issues. + + +Dockers +================================================================== +We provide a docker setup for the project. +To build the docker image, run the following command: + + +.. code-block:: bash + + cd /docker + ./build.sh + + +To run the docker image, run the following command: + + +.. code-block:: bash + + cd /docker + ./run.sh + +This will start the docker container and mount the home directory of the host machine to the docker container. +After you clone the project repository into your catkin_ws, you can build the packages inside the docker container. +To build the packages inside the docker container, follow the instructions in the `Build section <#build>`_ of this document. + + +On Desktop or Laptop with NVIDIA GPU +================================================================== + +CUDA & cuDNN +------------------------------------------------------------------ + +If you do not have CUDA and cuDNN installed, please install them first. +The tested versions are CUDA10.2, 11.6 + +`CUDA `_ +`cuDNN `_ + + +You can check how to install :ref:`here`. + +Python dependencies +------------------------------------------------------------------ + +You will need + +* `cupy `_ +* `numpy `_ +* `scipy `_ +* `shapely==1.7.1 `_ + +For traversability filter, either of + +* `torch `_ +* `chainer `_ + +Optionally, OpenCV for inpainting filter. + +* `opencv-python `_ + +Install `numpy`, `scipy`, `shapely`, `opencv-python` with the following command. + +.. code-block:: bash + + pip3 install -r requirements.txt + + +Cupy +------------------------------------------------------------------- + + +cupy can be installed with specific CUDA versions. (On jetson, only "from source" i.e. `pip install cupy` could work) +For CUDA 10.2 + +.. code-block:: bash + + pip install cupy-cuda102 + +For CUDA 11.0 + +.. code-block:: bash + + pip install cupy-cuda110 + +For CUDA 11.1 + +.. code-block:: bash + + pip install cupy-cuda111 + +For CUDA 11.2 + +.. code-block:: bash + + pip install cupy-cuda112 + +For CUDA 11.3 + +.. code-block:: bash + + pip install cupy-cuda113 + +For CUDA 11.4 + +.. code-block:: bash + + pip install cupy-cuda114 + +For CUDA 11.5 + +.. code-block:: bash + + pip install cupy-cuda115 + +For CUDA 11.6 + +.. code-block:: bash + + pip install cupy-cuda116 + +(Install CuPy from source) + +.. code-block:: bash + + pip install cupy + +Traversability filter +------------------------------------------------------------------- + +You can choose either pytorch, or chainer to run the CNN based traversability filter. +Install by following the official documents. + +* `torch `_ +* `chainer `_ + +Pytorch uses ~2GB more GPU memory than Chainer, but runs a bit faster. +Use parameter `use_chainer` to select which backend to use. + +ROS package dependencies +------------------------------------------------------------------- + +* `pybind11_catkin `_ +* `grid_map `_ + +.. code-block:: bash + + sudo apt install ros-noetic-pybind11-catkin + sudo apt install ros-noetic-grid-map-core ros-noetic-grid-map-msgs ros-noetic-grid-map-ros + + +On Jetson +================================================================== + +CUDA CuDNN +------------------------------------------------------------------- + +`CUDA` and `cuDNN` can be installed via apt. It comes with nvidia-jetpack. The tested version is jetpack 4.5 with L4T 32.5.0. + +Python dependencies +------------------------------------------------------------------- + +On jetson, you need the version for its CPU arch: + +Please check `official document `_ for latest information for pytorch. + +Current for Jetson Orin on Ubuntu 20.04: + +.. code-block:: bash + + export TORCH_INSTALL=https://developer.download.nvidia.cn/compute/redist/jp/v511/pytorch/torch-2.0.0+nv23.05-cp38-cp38-linux_aarch64.whl + pip install Cython + python -m pip install numpy==’1.24.1’ + python -m pip install --no-cache $TORCH_INSTALL + +Current for Jetson Xavier on Ubuntu 18.04: + +.. code-block:: bash + + wget https://nvidia.box.com/shared/static/p57jwntv436lfrd78inwl7iml6p13fzh.whl -O torch-1.8.0-cp36-cp36m-linux_aarch64.whl + pip3 install Cython + pip3 install numpy==1.19.5 torch-1.8.0-cp36-cp36m-linux_aarch64.whl + + +Also, you need to install cupy with + +.. code-block:: bash + + pip3 install cupy + + +This builds the packages from source so it would take time. + +ROS dependencies +----------------------- + +* `pybind11_catkin `_ +* `grid_map `_ + +.. code-block:: bash + + sudo apt install ros-melodic-pybind11-catkin + sudo apt install ros-melodic-grid-map-core ros-melodic-grid-map-msgs ros-melodic-grid-map-ros + + +Also, on jetson you need fortran (should already be installed). + +.. code-block:: bash + + sudo apt install gfortran + + +If the Jetson is set up with Jetpack 4.5 with ROS Melodic the following package is additionally required: + +.. code-block:: bash + + git clone git@github.com:ros/filters.git -b noetic-devel + + +Plane segmentation dependencies +================================================================== + +OpenCV +------------------------------------------------------------------- + +.. code-block:: bash + + sudo apt install libopencv-dev + +Eigen +------------------------------------------------------------------- + +.. code-block:: bash + + sudo apt install libeigen3-dev + +CGAL +------------------------------------------------------------------- + +CGAL5 is required. It will be automatically downloaded and installed into the catkin workspace by the cgal5_catkin package. Make sure you +have the third-party libaries installed on you machine: + +.. code-block:: bash + + sudo apt install libgmp-dev + sudo apt install libmpfr-dev + sudo apt install libboost-all-dev + + +Semantic Sensors +================================================================== +Elevation mapping node can receive multi-modal point cloud and image topics. +In this example, we use semantic segmentation models to process color images and publish those topics. + +Python dependencies +------------------------------------------------------------------- + +.. code-block:: bash + + pip3 install scikit-learn + +Torchvision (for Jetson Orin on Ubuntu 20.04) + +.. code-block:: bash + + git clone --branch release/0.15 https://github.com/pytorch/vision torchvision + cd torchvision/ + export BUILD_VERSION=0.15.1 + python3 setup.py install --user + +Detectron + +.. code-block:: bash + + python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git' + + +Build +================================================================== +After installing all the dependencies, you can build the packages. +Clone the project repository into your catkin_ws/src directory. +Then, build the packages with catkin. + +.. code-block:: bash + + cd + catkin build elevation_mapping_cupy # The core package + catkin build convex_plane_decomposition_ros # If you want to use plane segmentation + catkin build semantic_sensor # If you want to use semantic sensors diff --git a/docs/source/getting_started/introduction.rst b/docs/source/getting_started/introduction.rst new file mode 100644 index 00000000..90689dfe --- /dev/null +++ b/docs/source/getting_started/introduction.rst @@ -0,0 +1,132 @@ +.. _introduction: + + + +Introduction +****************************************************************** +The Elevaton Mapping CuPy software package represents an advancement in robotic navigation and locomotion. +Integrating with the Robot Operating System (ROS) and utilizing GPU acceleration, this framework enhances point cloud registration and ray casting, +crucial for efficient and accurate robotic movement, particularly in legged robots. + +Used for Various Real-World Applications +------------------------------------------------------------------- +This software package has been rigorously tested in challenging environments, including the DARPA Subterranean Challenge, +demonstrating its effectiveness in complex, real-world scenarios. +It supports a wide range of applications, from underground exploration to advanced research in legged locomotion and autonomous navigation. + +* **DARPA Subterranean Challenge**: This package was used by Team CERBERUS in the DARPA Subterranean Challenge. + + `Team Cerberus `_ + + `CERBERUS in the DARPA Subterranean Challenge (Science Robotics) `_ + +* **ESA / ESRIC Space Resources Challenge**: This package was used for the Space Resources Challenge. + + `Scientific exploration of challenging planetary analog environments with a team of legged robots (Science Robotics) `_ + + + + +Key Features +------------------------------------------------------------------- +* **Height Drift Compensation**: Tackles state estimation drifts that can create mapping artifacts, ensuring more accurate terrain representation. + +* **Visibility Cleanup and Artifact Removal**: Raycasting methods and an exclusion zone feature are designed to remove virtual artifacts and correctly interpret overhanging obstacles, preventing misidentification as walls. + +* **Learning-based Traversability Filter**: Assesses terrain traversability using local geometry, improving path planning and navigation. + +* **Versatile Locomotion Tools**: Incorporates smoothing filters and plane segmentation, optimizing movement across various terrains. + +* **Multi-Modal Elevation Map (MEM) Framework**: Allows seamless integration of diverse data like geometry, semantics, and RGB information, enhancing multi-modal robotic perception. + +* **GPU-Enhanced Efficiency**: Facilitates rapid processing of large data structures, crucial for real-time applications. + +Overview +------------------------------------------------------------------- + +.. image:: ../../media/overview.png + :alt: Overview of multi-modal elevation map structure + +Overview of our multi-modal elevation map structure. The framework takes multi-modal images (purple) and multi-modal (blue) point clouds as +input. This data is input into the elevation map by first associating the data to the cells and then fused with different fusion algorithms into the various +layers of the map. Finally the map can be post-processed with various custom plugins to generate new layers (e.g. traversability) or process layer for +external components (e.g. line detection). + +Subscribed Topics +------------------------------------------------------------------- +The subscribed topics are specified under **subscribers** parameter. + +Example setup is in **elevation_mapping_cupy/config/core/example_setup.yaml**. + +* **/** ([sensor_msgs/PointCloud2]) + + The point cloud topic. It can have additional channels for RGB, intensity, etc. + +* **/** ([sensor_msgs/Image]) + + The image topic. It can have additional channels for RGB, semantic probabilities, image features etc. + +* **/** ([sensor_msgs/CameraInfo]) + + The camera info topic. It is used to project the point cloud into the image plane. + +* **/** ([elevation_map_msgs/ChannelInfo]) + + If this topic is configured, the node will subscribe to it and use the information to associate the image channels to the elevation map layers. + +* **/tf** ([tf/tfMessage]) + + The transformation tree. + +* The plane segmentation node subscribes to an elevation map topic ([grid_map_msg/GridMap]). This can be configured in + **convex_plane_decomposition_ros/config/core/parameters.yaml** + +Published Topics +------------------------------------------------------------------- +For elevation_mapping_cupy, topics are published as set in the rosparam. +You can specify which layers to publish in which fps. + +Under publishers, you can specify the topic_name, layers basic_layers and fps. + +.. code-block:: yaml + + publishers: + your_topic_name: + layers: [ 'list_of_layer_names', 'layer1', 'layer2' ] # Choose from 'elevation', 'variance', 'traversability', 'time' + plugin layers + basic_layers: [ 'list of basic layers', 'layer1' ] # basic_layers for valid cell computation (e.g. Rviz): Choose a subset of `layers`. + fps: 5.0 # Publish rate. Use smaller value than `map_acquire_fps`. + + +Example setting in `config/parameters.yaml`. + +* **elevation_map_raw** ([grid_map_msg/GridMap]) + + The entire elevation map. + +* **elevation_map_recordable** ([grid_map_msg/GridMap]) + + The entire elevation map with slower update rate for visualization and logging. + +* **elevation_map_filter** ([grid_map_msg/GridMap]) + + The filtered maps using plugins. + +The plane segmentation node publishes the following: + +* **planar_terrain** ([convex_plane_decomposition_msgs/PlanarTerrain]) + + A custom message that contains the full segmentation as a map together with the boundary information. + +* **filtered_map** ([grid_map_msg/GridMap]) + + A grid map message to visualize the segmentation and some intermediate results. This information is also part of **planar_terrain**. + +* **boundaries** ([visualization_msgs/MarkerArray]) + + A set of polygons that trace the boundaries of the segmented region. Holes and boundaries of a single region are published as separate + markers with the same color. + +* **insets** ([visualization_msgs/PolygonArray]) + + A set of polygons that are at a slight inward offset from **boundaries**. There might be more insets than boundaries since the inward + shift can cause a single region to break down into multiple when narrow passages exist. diff --git a/docs/source/getting_started/tutorial.rst b/docs/source/getting_started/tutorial.rst new file mode 100644 index 00000000..12f9eda3 --- /dev/null +++ b/docs/source/getting_started/tutorial.rst @@ -0,0 +1,127 @@ +.. _tutorial: + +Tutorial +****************************************************************** +This tutorial will guide you through the basic usage of the elevation mapping cupy. +You will learn how to run the plane segmentation node, the sensor node, and the TurtleBot example. + +If you want to use your own custom plugins, please refer to the :ref:`plugins`. + + +Run +================================================================== + +Basic usage. + +.. code-block:: bash + + roslaunch elevation_mapping_cupy elevation_mapping_cupy.launch + + +For the plane segmentation node + +.. code-block:: bash + + roslaunch convex_plane_decomposition_ros convex_plane_decomposition.launch + +For the sensor node + +.. code-block:: bash + + roslaunch semantic_sensor semantic_pointcloud.launch + roslaunch semantic_sensor semantic_image.launch + + +Run TurtleBot example +================================================================== + +First, install turtlebot simulation. + +.. code-block:: bash + + sudo apt install ros-noetic-turtlebot3-gazebo ros-noetic-turtlebot3-teleop + + +Then, you can run the examples. For the basic version: + +.. code-block:: bash + + export TURTLEBOT3_MODEL=waffle + roslaunch elevation_mapping_cupy turtlesim_simple_example.launch + + +For fusing semantics into the map such as rgb from a multi modal pointcloud: + +.. image:: ../../media/turtlebot.png + :alt: Elevation map examples + +.. code-block:: bash + + export TURTLEBOT3_MODEL=waffle + roslaunch elevation_mapping_cupy turtlesim_semantic_pointcloud_example.launch + +For fusing semantics into the map such as rgb semantics or features from an image: + +.. code-block:: bash + + export TURTLEBOT3_MODEL=waffle + roslaunch elevation_mapping_cupy turtlesim_semantic_image_example.launch + +Or, for the version including plane segmentation: + +.. code-block:: bash + + catkin build convex_plane_decomposition_ros + export TURTLEBOT3_MODEL=waffle + roslaunch elevation_mapping_cupy turtlesim_plane_decomposition_example.launch + + +To control the robot with a keyboard, a new terminal window needs to be opened. +Then run + +.. code-block:: bash + + export TURTLEBOT3_MODEL=waffle + roslaunch turtlebot3_teleop turtlebot3_teleop_key.launch + + +Velocity inputs can be sent to the robot by pressing the keys `a`, `w`, `d`, `x`. To stop the robot completely, press `s`. + + + + +Errors +================================================================== + +If you build with the install flag under ros melodic, you might get issues with the modules not found: + +.. code-block:: bash + + terminate called after throwing an instance of 'pybind11::error_already_set' + what(): ModuleNotFoundError: No module named 'elevation_mapping_cupy' + +This is because python3 modules are installed into a different location. + +This can be fixed by including also the python3 modules location in the `PYTHONPATH` by adding following line into the launch file: + +.. code-block:: xml + + + +If you get error such as + +.. code-block:: none + + Make Error at /usr/share/cmake-3.16/Modules/FindPackageHandleStandardArgs.cmake:146 (message): + Could NOT find PythonInterp: Found unsuitable version "2.7.18", but + required is at least "3" (found /usr/bin/python) + + +Build with option. + +.. code-block:: bash + + catkin build elevation_mapping_cupy -DPYTHON_EXECUTABLE=$(which python3) + + + diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 00000000..d6566c77 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,39 @@ +.. include:: documentation.rst + +.. toctree:: + :maxdepth: 1 + :caption: Overview + + +.. toctree:: + :hidden: + :maxdepth: 3 + :caption: Getting Started + + Introduction + Installation + Tutorial + +.. toctree:: + :hidden: + :maxdepth: 2 + :caption: Usage + + Plugins + Parameters + Plane Segmentation + Semantics + + +.. toctree:: + :hidden: + :maxdepth: 3 + :caption: Library + + Python + + + + + + diff --git a/docs/source/python/elevation_mapping.rst b/docs/source/python/elevation_mapping.rst new file mode 100644 index 00000000..03bb1e28 --- /dev/null +++ b/docs/source/python/elevation_mapping.rst @@ -0,0 +1,29 @@ +.. _elevation_mapping: + +Elevation mapping cupy +****************************************************************** + +.. automodule:: elevation_mapping_cupy.elevation_mapping + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: elevation_mapping_cupy.semantic_map + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: elevation_mapping_cupy.parameter + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: elevation_mapping_cupy.plugins.plugin_manager + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: elevation_mapping_cupy.fusion + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/python/index.rst b/docs/source/python/index.rst new file mode 100644 index 00000000..d2eb5593 --- /dev/null +++ b/docs/source/python/index.rst @@ -0,0 +1,8 @@ +Python software +****************************************************************** + +.. toctree:: + elevation_mapping + semantic_sensor + + diff --git a/docs/source/python/semantic_sensor.rst b/docs/source/python/semantic_sensor.rst new file mode 100644 index 00000000..37904e19 --- /dev/null +++ b/docs/source/python/semantic_sensor.rst @@ -0,0 +1,47 @@ +.. _semantic_sensor: + +Semantic Pointcloud +****************************************************************** + + +.. automodule:: semantic_sensor.pointcloud_node + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: semantic_sensor.pointcloud_parameters + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: semantic_sensor.image_node + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: semantic_sensor.image_parameters + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: semantic_sensor.networks + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: semantic_sensor.utils + :members: + :undoc-members: + :show-inheritance: + + + + + + + + + diff --git a/docs/source/usage/parameters.rst b/docs/source/usage/parameters.rst new file mode 100644 index 00000000..0594a0f4 --- /dev/null +++ b/docs/source/usage/parameters.rst @@ -0,0 +1,46 @@ +.. _parameters: + +Parameters +****************************************************************** + +There are three parameter files: + +#. `Robot Setup Configurations`_ + +#. `Plugin configurations`_ + +#. `Core Parameters`_ + +#. `Sensor parameter`_ + + +Robot Setup Configurations +================================================ +Such as Publishers and subscribers + +.. include:: ../../../elevation_mapping_cupy/config/core/example_setup.yaml + :code: yaml + +Plugin configurations +================================================ + +More informations on the plugins can be found in :ref:`plugins`. + +.. include:: ../../../elevation_mapping_cupy/config/core/plugin_config.yaml + :code: yaml + +Core Parameters +============================================================== + +.. include:: ../../../elevation_mapping_cupy/config/core/core_param.yaml + :code: yaml + +Sensor parameter +================================================ + +More informations on the sensor configurations can be found in :ref:`semantics`. + +.. include:: ../../../sensor_processing/semantic_sensor/config/sensor_parameter.yaml + :code: yaml + + diff --git a/docs/source/usage/plane_segmentation.rst b/docs/source/usage/plane_segmentation.rst new file mode 100644 index 00000000..35c2efd7 --- /dev/null +++ b/docs/source/usage/plane_segmentation.rst @@ -0,0 +1,7 @@ +.. _plane_segmentation: + +Plane Segmentation +****************************************************************** + +.. image:: ../media/convex_approximation.gif + :alt: Convex approximation diff --git a/docs/source/usage/plugins.rst b/docs/source/usage/plugins.rst new file mode 100644 index 00000000..4a3f6d9b --- /dev/null +++ b/docs/source/usage/plugins.rst @@ -0,0 +1,127 @@ +.. _plugins: + +Plugins +****************************************************************** + +You can add your own plugin to process the elevation map and publish as a layer in GridMap message. + +This page is structured in two parts: + +* `Create a plugin`_ + +* `Existing plugins`_ + + +Create a plugin +================================================================== +You can create your own plugin to process the elevation map and publish as a layer in GridMap message. + +Let's look at the example. + +First, create your plugin file in `elevation_mapping_cupy/script/plugins/` and save as `example.py`. + +.. code-block:: python + + import cupy as cp + from typing import List + from .plugin_manager import PluginBase + + + class NameOfYourPlugin(PluginBase): + def __init__(self, add_value:float=1.0, **kwargs): + super().__init__() + self.add_value = float(add_value) + + def __call__(self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_layers: cp.ndarray, + semantic_layer_names: List[str], + *args + )->cp.ndarray: + # Process maps here + # You can also use the other plugin's data through plugin_layers. + new_elevation = elevation_map[0] + self.add_value + return new_elevation + +Then, add your plugin setting to `config/plugin_config.yaml` + +.. code-block:: yaml + + example: # Name of your filter + type: "example" # Specify the name of your plugin (the name of your file name). + enable: True # weather to load this plugin + fill_nan: True # Fill nans to invalid cells of elevation layer. + is_height_layer: True # If this is a height layer (such as elevation) or not (such as traversability) + layer_name: "example_layer" # The layer name. + extra_params: # This params are passed to the plugin class on initialization. + add_value: 2.0 # Example param + + example_large: # You can apply same filter with different name. + type: "example" # Specify the name of your plugin (the name of your file name). + enable: True # weather to load this plugin + fill_nan: True # Fill nans to invalid cells of elevation layer. + is_height_layer: True # If this is a height layer (such as elevation) or not (such as traversability) + layer_name: "example_layer_large" # The layer name. + extra_params: # This params are passed to the plugin class on initialization. + add_value: 100.0 # Example param with larger value. + + +Finally, add your layer name to publishers in your config. You can create a new topic or add to existing topics. + +.. code-block:: yaml + + plugin_example: # Topic name + layers: [ 'elevation', 'example_layer', 'example_layer_large' ] + basic_layers: [ 'example_layer' ] + fps: 1.0 # The plugin is called with this fps. + + + + + +Existing plugins +================================================================== + +This section lists the plugins that are already installed and available for use. + + + + +1. Min filter +------------------------------------------------------------------- +.. automodule:: elevation_mapping_cupy.plugins.min_filter + :members: + +2. Inpainting +------------------------------------------------------------------- + +.. automodule:: elevation_mapping_cupy.plugins.inpainting + :members: + +3. Smooth Filter +------------------------------------------------------------------- +.. automodule:: elevation_mapping_cupy.plugins.smooth_filter + :members: + +4. Robot centric elevation +------------------------------------------------------------------- +.. automodule:: elevation_mapping_cupy.plugins.robot_centric_elevation + :members: + +5. Semantic Filter +------------------------------------------------------------------- +.. automodule:: elevation_mapping_cupy.plugins.semantic_filter + :members: + +6. Semantic traversability +------------------------------------------------------------------- +.. automodule:: elevation_mapping_cupy.plugins.semantic_traversability + :members: + +7. Features PCA +------------------------------------------------------------------- +.. automodule:: elevation_mapping_cupy.plugins.features_pca + :members: \ No newline at end of file diff --git a/docs/source/usage/semantics.rst b/docs/source/usage/semantics.rst new file mode 100644 index 00000000..24616a10 --- /dev/null +++ b/docs/source/usage/semantics.rst @@ -0,0 +1,79 @@ +.. _semantics: + +Semantics +****************************************************************** + +The elevation map is also able to include semantic information. +The workflow consists in two elements: + +* semantic extension of the elevation map +* semantic pointcloud + +semantic extension of the elevation map +========================================== + +The semantics of the elevation map can be configured in the sensor parameter file. +The sensor parameter file contains all the topics the map subscribes to. + +The channels and fusion lists are the parameters that define the semantics of the elevation map. +The channel is a list that contains the name of the semantic layers as well as the name of the channel +of the data that the elevation map node subscribes to. +The fusion list indicates the fusion algorithm type which is applied to the data to fuse the +sensor data into the map. + +There are different fusion algorithms types: + +* average +"""""""""""""""""""""""""" + + Takes the average of all the points that fall within the same cell and average them + and then takes a weighted average of the existing value. + + use case: semantic features + +* bayesian_inference: +"""""""""""""""""""""""""" + + Employs a gaussian bayesian inference at each iteration. Where we use the psoterior + of the previous iteration as the prior to the new iteration. + + use case: semantic features + + +* class_average +"""""""""""""""""""""""""" + + Takes the average of all the points that fall within the same cell and average them + and then takes a weighted average of the existing value. If the previous value is zero + it weights the previous value with a zero weight. + + use case: class probabilities + +* class_bayesian +"""""""""""""""""""""""""" + + Employs a bayesian inference of a categorical distribution with a dirichlet prior. + The alpha hyperparameters of the dirichlet prior are updated at every iteration, + such that the posterior of iteration t-1 is the prior of t. + + use case: class probabilities + + +* color +"""""""""""""""""""""""""" + + The color is subscribed as a uint32. The color of the cell is the result of the average of + all the points within that cell. + + use case: rgb color information + +Semantic pointcloud +======================================= + +Sensors do not always directly provide all semantic information. +The semantic pointcloud is a ROS node that subscribes to stereo cameras and generates a +multichannel pointcloud containing semantic information additionally to the goemteric position of +the points. +The pointcloud is also configured from the sensor_parameter file. + + diff --git a/elevation_map_msgs/CMakeLists.txt b/elevation_map_msgs/CMakeLists.txt new file mode 100644 index 00000000..08e4467b --- /dev/null +++ b/elevation_map_msgs/CMakeLists.txt @@ -0,0 +1,55 @@ +cmake_minimum_required(VERSION 3.5) +project(elevation_map_msgs) + +find_package(ament_cmake REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(rosidl_default_generators REQUIRED) +find_package(std_msgs REQUIRED) # Added std_msgs + +set(msg_files + "msg/Statistics.msg" + "msg/ChannelInfo.msg" +) + +set(srv_files + "srv/CheckSafety.srv" + "srv/Initialize.srv" +) + +rosidl_generate_interfaces(${PROJECT_NAME} + ${msg_files} + ${srv_files} + DEPENDENCIES geometry_msgs std_msgs std_msgs # Added std_msgs +) + +ament_export_dependencies(rosidl_default_runtime) + +ament_package() + +# cmake_minimum_required(VERSION 3.8) +# project(elevation_map_msgs) + +# if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") +# add_compile_options(-Wall -Wextra -Wpedantic) +# endif() + +# # find dependencies +# find_package(ament_cmake REQUIRED) +# find_package(rosidl_default_generators REQUIRED) +# find_package(builtin_interfaces REQUIRED) +# find_package(std_msgs REQUIRED) +# find_package(geometry_msgs REQUIRED) +# find_package(action_msgs REQUIRED) + +# rosidl_generate_interfaces(${PROJECT_NAME} +# "msg/Statistics.msg" +# "msg/ChannelInfo.msg" +# "srv/CheckSafety.srv" +# "srv/Initialize.srv" +# DEPENDENCIES builtin_interfaces geometry_msgs std_msgs action_msgs +# ) + +# ament_export_dependencies(rosidl_default_runtime) + +# ament_package() + diff --git a/elevation_map_msgs/msg/ChannelInfo.msg b/elevation_map_msgs/msg/ChannelInfo.msg new file mode 100644 index 00000000..2e6327ec --- /dev/null +++ b/elevation_map_msgs/msg/ChannelInfo.msg @@ -0,0 +1,2 @@ +std_msgs/Header header +string[] channels # channel names for each layer \ No newline at end of file diff --git a/elevation_map_msgs/msg/Statistics.msg b/elevation_map_msgs/msg/Statistics.msg new file mode 100644 index 00000000..85daaa50 --- /dev/null +++ b/elevation_map_msgs/msg/Statistics.msg @@ -0,0 +1,2 @@ +std_msgs/Header header +float64 pointcloud_process_fps diff --git a/elevation_map_msgs/package.xml b/elevation_map_msgs/package.xml new file mode 100644 index 00000000..731eaebb --- /dev/null +++ b/elevation_map_msgs/package.xml @@ -0,0 +1,31 @@ + + + elevation_map_msgs + 0.0.0 + ROS 2 Message definitions for elevation mapping. + Takahiro Miki + Takahiro Miki + MIT + + + ament_cmake + + geometry_msgs + rosidl_default_generators + std_msgs + + geometry_msgs + rosidl_default_runtime + std_msgs + + rosidl_interface_packages + + + ament_lint_auto + ament_lint_common + + + ament_cmake + rosidl_interface_packages + + diff --git a/elevation_map_msgs/srv/CheckSafety.srv b/elevation_map_msgs/srv/CheckSafety.srv new file mode 100644 index 00000000..ed864d4a --- /dev/null +++ b/elevation_map_msgs/srv/CheckSafety.srv @@ -0,0 +1,14 @@ +# Polygons to check +geometry_msgs/PolygonStamped[] polygons +bool compute_untraversable_polygon + +# Results +--- +bool[] is_safe + +# Estimate of the traversability of the path. +# Ranges from 0 to 1 where 0 means not traversable and 1 highly traversable. +float64[] traversability + +# Polygons that are untraversable. +geometry_msgs/PolygonStamped[] untraversable_polygons diff --git a/elevation_map_msgs/srv/Initialize.srv b/elevation_map_msgs/srv/Initialize.srv new file mode 100644 index 00000000..6aafd91b --- /dev/null +++ b/elevation_map_msgs/srv/Initialize.srv @@ -0,0 +1,14 @@ +# Initialization methods +# TODO add more methods. +uint8 POINTS=0 + +uint8 NEAREST=0 +uint8 LINEAR=1 +uint8 CUBIC=2 + +uint8 type +uint8 method +geometry_msgs/PointStamped[] points + +--- +bool success diff --git a/elevation_mapping_cupy/CMakeLists.txt b/elevation_mapping_cupy/CMakeLists.txt new file mode 100644 index 00000000..e2a66c5c --- /dev/null +++ b/elevation_mapping_cupy/CMakeLists.txt @@ -0,0 +1,132 @@ +cmake_minimum_required(VERSION 3.8) +project(elevation_mapping_cupy) + +# # Enable C++11 (or higher if needed for ROS 2 and pybind11) +# set(CMAKE_CXX_STANDARD 11) +# set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() + +# Compiler options +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# Additional dependencies +# find_package(Python COMPONENTS Interpreter Development) +find_package(PythonInterp 3 REQUIRED) +find_package(pybind11 CONFIG REQUIRED) +find_package(Eigen3 REQUIRED) +find_package(OpenCV REQUIRED) + +# Find pybind11 +message([MAIN] "Found pybind11 v${pybind11_VERSION}: ${pybind11_INCLUDE_DIRS}") +message([MAIN] "pybind11_INCLUDE_DIRS = ${pybind11_INCLUDE_DIRS}") +message([MAIN] "pybind11_LIBRARIES = ${pybind11_LIBRARIES}") + +# Find ROS 2 dependencies +find_package(ament_cmake REQUIRED) +find_package(ament_cmake_python REQUIRED) +find_package(rclcpp REQUIRED) +find_package(rclpy REQUIRED) +find_package(builtin_interfaces REQUIRED) +find_package(message_filters REQUIRED) +find_package(std_msgs REQUIRED) +find_package(std_srvs REQUIRED) +find_package(sensor_msgs REQUIRED) +find_package(grid_map_msgs REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(elevation_map_msgs REQUIRED) +find_package(grid_map_ros REQUIRED) +find_package(image_transport REQUIRED) +find_package(pcl_ros REQUIRED) +find_package(tf2_eigen REQUIRED) +find_package(python_cmake_module REQUIRED) +find_package(point_cloud_transport REQUIRED) + +# List dependencies for ament_target_dependencies +set(dependencies + rclcpp + rclpy + std_msgs + std_srvs + builtin_interfaces + geometry_msgs + sensor_msgs + elevation_map_msgs + grid_map_msgs + grid_map_ros + image_transport + pcl_ros + message_filters + tf2_eigen + point_cloud_transport +) + +# Include directories +include_directories( + include + ${PYTHON_INCLUDE_DIRS} + ${Eigen3_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${pybind11_INCLUDE_DIRS} + ${elevation_map_msgs_INCLUDE_DIRS} +) + +# Declare C++ library +add_library(elevation_mapping_ros + src/elevation_mapping_wrapper.cpp + src/elevation_mapping_ros.cpp +) + +# Link the library with necessary dependencies +target_link_libraries(elevation_mapping_ros ${PYTHON_LIBRARIES} ${OpenCV_LIBRARIES} pybind11::embed) +ament_target_dependencies(elevation_mapping_ros ${dependencies}) + +# Declare C++ executable +add_executable(elevation_mapping_node src/elevation_mapping_node.cpp) + +# Link the executable with the library and dependencies +target_link_libraries(elevation_mapping_node elevation_mapping_ros ${OpenCV_LIBRARIES} pybind11::embed) +ament_target_dependencies(elevation_mapping_node ${dependencies}) + + +# Install targets Not sure if these other argrs are necessary +# install( +# TARGETS elevation_mapping_ros elevation_mapping_node +# DESTINATION lib/${PROJECT_NAME} +# ARCHIVE DESTINATION lib +# LIBRARY DESTINATION lib +# RUNTIME DESTINATION bin +# ) + +install(TARGETS elevation_mapping_ros elevation_mapping_node + DESTINATION lib/${PROJECT_NAME} +) + +# install(PROGRAMS +# DESTINATION lib/${PROJECT_NAME} +# ) + +# Install launch, config, and rviz directories +install( + DIRECTORY launch config rviz + DESTINATION share/${PROJECT_NAME} +) + +# TODO: Understand if this line is necessary +# _ament_cmake_python_register_environment_hook() +# For use as a Python module outside of ROS 2 +ament_python_install_package(${PROJECT_NAME}) + +# Install the Python ROS 2 modules +install(PROGRAMS + scripts/elevation_mapping_node.py + DESTINATION lib/${PROJECT_NAME} +) + +# Ament package declaration +ament_package() diff --git a/elevation_mapping_cupy/README.md b/elevation_mapping_cupy/README.md new file mode 100644 index 00000000..671de643 --- /dev/null +++ b/elevation_mapping_cupy/README.md @@ -0,0 +1,28 @@ +# Example 1: Turtle Simple Example +Set the env in the Dockerfile +```dockerfile +ENV TURTLEBOT3_MODEL=waffle_realsense_depth +``` +or in the terminal +```bash +export TURTLEBOT3_MODEL=waffle_realsense_depth +``` +If using zenoh as rmw then start one terminal up and run the router +```bash +ros2 run rmw_zenoh_cpp rmw_zenohd +``` + +Now launch the turtlebot3 in Gazebo with the following command: +```bash +ros2 launch turtlebot3_gazebo turtlebot3_world.launch.py +``` + +Launch the elevation mapping node with the configs for the turtle. Set use_python_node to true to use it instead of the cpp node: +```bash +ros2 launch elevation_mapping_cupy elevation_mapping_turtle.launch.py use_python_node:=false +``` + +If you want to drive the turtlebot around using the keyboard then run: +```bash +ros2 run turtlebot3_teleop teleop_keyboard +``` \ No newline at end of file diff --git a/elevation_mapping_cupy/config/core/core_param.yaml b/elevation_mapping_cupy/config/core/core_param.yaml new file mode 100644 index 00000000..9c9f2792 --- /dev/null +++ b/elevation_mapping_cupy/config/core/core_param.yaml @@ -0,0 +1,82 @@ +elevation_mapping_node: + ros__parameters: + #### Basic parameters ######## + voxel_filter_size: 0.1 + average_weight: 0.5 + drift_compensation_variance_inlier: 0.1 + checker_layer: 'elevation' # layer name for checking the validity of the cell. + initialized_variance: 10.0 # initial variance for each cell. + resolution: 0.1 # resolution in m. + map_length: 20.0 # map's size in m. + sensor_noise_factor: 0.05 # point's noise is sensor_noise_factor*z^2 (z is distance from sensor). + mahalanobis_thresh: 2.0 # points outside this distance is outlier. + outlier_variance: 0.01 # if point is outlier, add this value to the cell. + drift_compensation_variance_inler: 0.05 # cells under this value is used for drift compensation. + max_drift: 0.1 # drift compensation happens only the drift is smaller than this value (for safety) + drift_compensation_alpha: 0.1 # drift compensation alpha for smoother update of drift compensation + time_variance: 0.0001 # add this value when update_variance is called. + max_variance: 100.0 # maximum variance for each cell. + initial_variance: 1000.0 # initial variance for each cell. + traversability_inlier: 0.9 # cells with higher traversability are used for drift compensation. + dilation_size: 3 # dilation filter size before traversability filter. + wall_num_thresh: 20 # if there are more points than this value, only higher points than the current height are used to make the wall more sharp. + min_height_drift_cnt: 100 # drift compensation only happens if the valid cells are more than this number. + position_noise_thresh: 0.01 # if the position change is bigger than this value, the drift compensation happens. + orientation_noise_thresh: 0.01 # if the orientation change is bigger than this value, the drift compensation happens. + position_lowpass_alpha: 0.2 # lowpass filter alpha used for detecting movements. + orientation_lowpass_alpha: 0.2 # lowpass filter alpha used for detecting movements. + min_valid_distance: 0.1 # points with shorter distance will be filtered out. + max_height_range: 10.5 # points higher than this value from sensor will be filtered out to disable ceiling. + ramped_height_range_a: 0.3 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + ramped_height_range_b: 1.0 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + ramped_height_range_c: 0.2 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + update_variance_fps: 5.0 # fps for updating variance. + update_pose_fps: 10.0 # fps for updating pose and shift the center of map. + time_interval: 0.1 # Time layer is updated with this interval. + map_acquire_fps: 5.0 # Raw map is fetched from GPU memory in this fps. + publish_statistics_fps: 1.0 # Publish statistics topic in this fps. + recordable_fps: 3.0 # Recordable fps for pointcloud. + enable_normal_arrow_publishing: false + + max_ray_length: 10.0 # maximum length for ray tracing. + cleanup_step: 0.1 # subtitute this value from validity layer at visibiltiy cleanup. + cleanup_cos_thresh: 0.1 # subtitute this value from validity layer at visibiltiy cleanup. + + safe_thresh: 0.7 # if traversability is smaller, it is counted as unsafe cell. + safe_min_thresh: 0.4 # polygon is unsafe if there exists lower traversability than this. + max_unsafe_n: 10 # if the number of cells under safe_thresh exceeds this value, polygon is unsafe. + + overlap_clear_range_xy: 4.0 # xy range [m] for clearing overlapped area. this defines the valid area for overlap clearance. (used for multi floor setting) + overlap_clear_range_z: 2.0 # z range [m] for clearing overlapped area. cells outside this range will be cleared. (used for multi floor setting) + + # Default frame configuration (can be overridden in robot-specific configs) + map_frame: 'odom' # The map frame where the odometry source uses. + base_frame: 'base_link' # The robot's base frame. This frame will be a center of the map. + corrected_map_frame: 'odom' + + #### Feature toggles ######## + enable_edge_sharpen: true + enable_visibility_cleanup: true + enable_drift_compensation: true + enable_overlap_clearance: true + enable_pointcloud_publishing: false + enable_drift_corrected_TF_publishing: false + enable_normal_color: false # If true, the map contains 'color' layer corresponding to normal. Add 'color' layer to the publishers setting if you want to visualize. + + #### Traversability filter ######## + use_chainer: false # Use chainer as a backend of traversability filter or pytorch. If false, it uses pytorch. pytorch requires ~2GB more GPU memory compared to chainer but runs faster. + weight_file: '$(find-pkg-share elevation_mapping_cupy)/config/core/weights.dat' # Weight file for traversability filter + + #### Upper bound ######## + use_only_above_for_upper_bound: false + + #### Initializer ######## + initialize_method: 'linear' # Choose one from 'nearest', 'linear', 'cubic' + initialize_frame_id: ['base_link'] # One tf (like ['footprint'] ) initializes a square around it. + initialize_tf_offset: [0.0, 0.0, 0.0, 0.0] # z direction. Should be same number as initialize_frame_id. + dilation_size_initialize: 2 # dilation size after the init. + initialize_tf_grid_size: 1.0 # This is not used if number of tf is more than 3. + use_initializer_at_start: true # Use initializer when the node starts. + + #### Default Plugins ######## + plugin_config_file: '$(find-pkg-share elevation_mapping_cupy)/config/core/plugin_config.yaml' \ No newline at end of file diff --git a/elevation_mapping_cupy/config/core/example_setup.yaml b/elevation_mapping_cupy/config/core/example_setup.yaml new file mode 100644 index 00000000..c67376af --- /dev/null +++ b/elevation_mapping_cupy/config/core/example_setup.yaml @@ -0,0 +1,60 @@ +elevation_mapping: + ros__parameters: + #### Plugins ######## + plugin_config_file: '$(find_package_share elevation_mapping_cupy)/config/core/plugin_config.yaml' + + #### Channel Fusions ######## + pointcloud_channel_fusions: + rgb: 'color' # 'color' fusion is used for the 'rgb' channel + default: 'average' # 'average' fusion is used for channels not listed here + + image_channel_fusions: + rgb: 'color' # 'color' fusion is used for the 'rgb' channel + default: 'exponential' # 'exponential' fusion is used for channels not listed here + feat_.*: 'exponential' # 'exponential' fusion is also used for any channel starting with 'feat_' Regular expressions can be used for channel names + + #### Subscribers ######## + # pointcloud_sensor_name: + # topic_name: '/sensor/pointcloud_semantic' + # data_type: pointcloud # pointcloud or image + # + # image_sensor_name: + # topic_name: '/camera/image_semantic' + # data_type: image # pointcloud or image + # camera_info_topic_name: '/camera/depth/camera_info' + # channel_info_topic_name: '/camera/channel_info' + + + subscribers: + front_cam: + topic_name: '/camera/depth/points' + data_type: pointcloud + color_cam: # for color camera + topic_name: '/camera/rgb/image_raw' + camera_info_topic_name: '/camera/depth/camera_info' + data_type: image + semantic_cam: # for semantic images + topic_name: '/front_cam/semantic_image' + camera_info_topic_name: '/front_cam/camera/depth/camera_info_resized' + channel_info_topic_name: '/front_cam/channel_info' + data_type: image + + #### Publishers ######## + # topic_name: + # layers: # Choose from 'elevation', 'variance', 'traversability', 'time', 'normal_x', 'normal_y', 'normal_z', 'color', plugin_layer_names + # basic_layers: # basic_layers for valid cell computation (e.g. Rviz): Choose a subset of `layers`. + # fps: # Publish rate. Use smaller value than `map_acquire_fps`. + + publishers: + elevation_map_raw: + layers: ['elevation', 'traversability', 'variance','rgb'] + basic_layers: ['elevation'] + fps: 5.0 + elevation_map_recordable: + layers: ['elevation', 'traversability'] + basic_layers: ['elevation', 'traversability'] + fps: 2.0 + elevation_map_filter: + layers: ['min_filter', 'smooth', 'inpaint', 'elevation'] + basic_layers: ['min_filter'] + fps: 3.0 \ No newline at end of file diff --git a/elevation_mapping_cupy/config/core/plugin_config.yaml b/elevation_mapping_cupy/config/core/plugin_config.yaml new file mode 100644 index 00000000..36a1a580 --- /dev/null +++ b/elevation_mapping_cupy/config/core/plugin_config.yaml @@ -0,0 +1,36 @@ +# min_filter fills in minimum value around the invalid cell. +min_filter: + enable: True # weather to load this plugin + fill_nan: False # Fill nans to invalid cells of elevation layer. + is_height_layer: True # If this is a height layer (such as elevation) or not (such as traversability) + layer_name: "min_filter" # The layer name. + extra_params: # This params are passed to the plugin class on initialization. + dilation_size: 1 # The patch size to apply + iteration_n: 30 # The number of iterations +# Apply smoothing. +smooth_filter: + enable: True + fill_nan: False + is_height_layer: True + layer_name: "smooth" + extra_params: + input_layer_name: "min_filter" +# Apply inpainting using opencv +inpainting: + enable: True + fill_nan: False + is_height_layer: True + layer_name: "inpaint" + extra_params: + method: "telea" # telea or ns +# Apply smoothing for inpainted layer +erosion: + enable: True + fill_nan: False + is_height_layer: False + layer_name: "erosion" + extra_params: + input_layer_name: "traversability" + dilation_size: 3 + iteration_n: 20 + reverse: True \ No newline at end of file diff --git a/elevation_mapping_cupy/config/core/weights.dat b/elevation_mapping_cupy/config/core/weights.dat new file mode 100644 index 00000000..13dc7dc6 Binary files /dev/null and b/elevation_mapping_cupy/config/core/weights.dat differ diff --git a/elevation_mapping_cupy/config/setups/anymal/anymal_parameters.yaml b/elevation_mapping_cupy/config/setups/anymal/anymal_parameters.yaml new file mode 100644 index 00000000..8bd080d6 --- /dev/null +++ b/elevation_mapping_cupy/config/setups/anymal/anymal_parameters.yaml @@ -0,0 +1,91 @@ +/elevation_mapping_node: + ros__parameters: + #### Basic parameters ######## + resolution: 0.04 # resolution in m. + map_length: 8 # map's size in m. + sensor_noise_factor: 0.05 # point's noise is sensor_noise_factor*z^2 (z is distance from sensor). + mahalanobis_thresh: 2.0 # points outside this distance is outlier. + outlier_variance: 0.01 # if point is outlier, add this value to the cell. + drift_compensation_variance_inler: 0.05 # cells under this value is used for drift compensation. + max_drift: 0.1 # drift compensation happens only the drift is smaller than this value (for safety) + drift_compensation_alpha: 0.1 # drift compensation alpha for smoother update of drift compensation + time_variance: 0.0001 # add this value when update_variance is called. + max_variance: 100.0 # maximum variance for each cell. + initial_variance: 1000.0 # initial variance for each cell. + traversability_inlier: 0.9 # cells with higher traversability are used for drift compensation. + dilation_size: 3 # dilation filter size before traversability filter. + wall_num_thresh: 20 # if there are more points than this value, only higher points than the current height are used to make the wall more sharp. + min_height_drift_cnt: 100 # drift compensation only happens if the valid cells are more than this number. + position_noise_thresh: 0.01 # if the position change is bigger than this value, the drift compensation happens. + orientation_noise_thresh: 0.01 # if the orientation change is bigger than this value, the drift compensation happens. + position_lowpass_alpha: 0.2 # lowpass filter alpha used for detecting movements. + orientation_lowpass_alpha: 0.2 # lowpass filter alpha used for detecting movements. + min_valid_distance: 0.5 # points with shorter distance will be filtered out. + max_height_range: 1.0 # points higher than this value from sensor will be filtered out to disable ceiling. + ramped_height_range_a: 0.3 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + ramped_height_range_b: 1.0 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + ramped_height_range_c: 0.2 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + update_variance_fps: 5.0 # fps for updating variance. + update_pose_fps: 10.0 # fps for updating pose and shift the center of map. + time_interval: 0.1 # Time layer is updated with this interval. + map_acquire_fps: 5.0 # Raw map is fetched from GPU memory in this fps. + publish_statistics_fps: 1.0 # Publish statistics topic in this fps. + + max_ray_length: 10.0 # maximum length for ray tracing. + cleanup_step: 0.1 # subtitute this value from validity layer at visibiltiy cleanup. + cleanup_cos_thresh: 0.1 # subtitute this value from validity layer at visibiltiy cleanup. + + safe_thresh: 0.7 # if traversability is smaller, it is counted as unsafe cell. + safe_min_thresh: 0.4 # polygon is unsafe if there exists lower traversability than this. + max_unsafe_n: 10 # if the number of cells under safe_thresh exceeds this value, polygon is unsafe. + + overlap_clear_range_xy: 4.0 # xy range [m] for clearing overlapped area. this defines the valid area for overlap clearance. (used for multi floor setting) + overlap_clear_range_z: 2.0 # z range [m] for clearing overlapped area. cells outside this range will be cleared. (used for multi floor setting) + + map_frame: 'odom' # The map frame where the odometry source uses. + base_frame: 'base' # The robot's base frame. This frame will be a center of the map. + corrected_map_frame: 'odom_corrected' + + #### Feature toggles ######## + enable_edge_sharpen: true + enable_visibility_cleanup: true + enable_drift_compensation: true + enable_overlap_clearance: true + enable_pointcloud_publishing: false + enable_drift_corrected_TF_publishing: false + enable_normal_color: false # If true, the map contains 'color' layer corresponding to normal. Add 'color' layer to the publishers setting if you want to visualize. + + #### Traversability filter ######## + use_chainer: false # Use chainer as a backend of traversability filter or pytorch. If false, it uses pytorch. pytorch requires ~2GB more GPU memory compared to chainer but runs faster. + weight_file: '$(rospack find elevation_mapping_cupy)/config/core/weights.dat' # Weight file for traversability filter + + #### Upper bound ######## + use_only_above_for_upper_bound: false + + #### Plugins ######## + # plugin_config_file: '$(rospack find elevation_mapping_cupy)/config/anymal_plugin_config.yaml' + + #### Publishers ######## + # topic_name: + # layers: # Choose from 'elevation', 'variance', 'traversability', 'time', 'normal_x', 'normal_y', 'normal_z', 'color', plugin_layer_names + # basic_layers: # basic_layers for valid cell computation (e.g. Rviz): Choose a subset of `layers`. + # fps: # Publish rate. Use smaller value than `map_acquire_fps`. + + # publishers: + # elevation_map_raw: + # layers: ['elevation', 'traversability', 'variance'] + # basic_layers: ['elevation', 'traversability'] + # fps: 5.0 + + # semantic_map_raw: + # layers: ['elevation', 'traversability'] + # basic_layers: ['elevation', 'traversability'] + # fps: 5.0 + + #### Initializer ######## + initialize_method: 'linear' # Choose one from 'nearest', 'linear', 'cubic' + initialize_frame_id: ['RF_FOOT', 'LF_FOOT', 'RH_FOOT', 'LH_FOOT'] # One tf (like ['footprint'] ) initializes a square around it. + initialize_tf_offset: [0.0, 0.0, 0.0, 0.0] # z direction. Should be same number as initialize_frame_id. + dilation_size_initialize: 5 # dilation size after the init. + initialize_tf_grid_size: 0.4 # This is not used if number of tf is more than 3. + use_initializer_at_start: true # Use initializer when the node starts. \ No newline at end of file diff --git a/elevation_mapping_cupy/config/setups/anymal/anymal_plugin_config.yaml b/elevation_mapping_cupy/config/setups/anymal/anymal_plugin_config.yaml new file mode 100644 index 00000000..5ffd1006 --- /dev/null +++ b/elevation_mapping_cupy/config/setups/anymal/anymal_plugin_config.yaml @@ -0,0 +1,30 @@ +/elevation_mapping_node: + ros__parameters: + # Settings of the plugins. (The plugins should be stored in script/plugins) + # min_filter fills in minimum value around the invalid cell. + min_filter: + enable: True # weather to load this plugin + fill_nan: False # Fill nans to invalid cells of elevation layer. + is_height_layer: True # If this is a height layer (such as elevation) or not (such as traversability) + layer_name: "min_filter" # The layer name. + extra_params: # This params are passed to the plugin class on initialization. + dilation_size: 1 # The patch size to apply + iteration_n: 30 # The number of iterations + + # Apply smoothing. + smooth_filter: + enable: True + fill_nan: False + is_height_layer: True + layer_name: "smooth" + extra_params: + input_layer_name: "min_filter" + + # Apply inpainting using opencv + inpainting: + enable: True + fill_nan: False + is_height_layer: True + layer_name: "inpaint" + extra_params: + method: "telea" # telea or ns diff --git a/elevation_mapping_cupy/config/setups/anymal/anymal_sensor_parameter.yaml b/elevation_mapping_cupy/config/setups/anymal/anymal_sensor_parameter.yaml new file mode 100644 index 00000000..f68e5864 --- /dev/null +++ b/elevation_mapping_cupy/config/setups/anymal/anymal_sensor_parameter.yaml @@ -0,0 +1,83 @@ +/elevation_mapping_node: + ros__parameters: + plugin_config_file: '$(rospack find elevation_mapping_cupy)/config/setups/anymal/anymal_plugin_config.yaml' + + pointcloud_channel_fusions: + rgb: 'color' + default: 'average' + + image_channel_fusions: + rgb: 'color' + default: 'exponential' + feat_.*: 'exponential' + sem_.*: 'exponential' + + #### Publishers ######## + # topic_name: + # layers: # Choose from 'elevation', 'variance', 'traversability', 'time', 'normal_x', 'normal_y', 'normal_z', 'color', plugin_layer_names + # basic_layers: # basic_layers for valid cell computation (e.g. Rviz): Choose a subset of `layers`. + # fps: # Publish rate. Use smaller value than `map_acquire_fps`. + + publishers: + elevation_map_raw: + layers: ['elevation', 'traversability', 'variance', 'rgb', 'upper_bound'] + basic_layers: ['elevation', 'traversability'] + fps: 5.0 + + elevation_map_recordable: + layers: ['elevation', 'traversability', 'variance', 'rgb'] + basic_layers: ['elevation', 'traversability'] + fps: 2.0 + + filtered_elevation_map: + layers: ['inpaint', 'smooth', 'min_filter', 'upper_bound'] + basic_layers: ['inpaint'] + fps: 5.0 + + #### Subscribers ######## + subscribers: + front_upper_depth: + topic_name: /depth_camera_front_upper/point_cloud_self_filtered + data_type: pointcloud + + front_lower_depth: + topic_name: /depth_camera_front_lower/point_cloud_self_filtered + data_type: pointcloud + + rear_upper_depth: + topic_name: /depth_camera_rear_upper/point_cloud_self_filtered + data_type: pointcloud + + rear_lower_depth: + topic_name: /depth_camera_rear_lower/point_cloud_self_filtered + data_type: pointcloud + + left_depth: + topic_name: /depth_camera_left/point_cloud_self_filtered + data_type: pointcloud + + right_depth: + topic_name: /depth_camera_right/point_cloud_self_filtered + data_type: pointcloud + + front_wide_angle: + topic_name: /wide_angle_camera_front/image_color_rect/compressed + camera_info_topic_name: /wide_angle_camera_front/camera_info + data_type: image + + rear_wide_angle: + topic_name: /wide_angle_camera_rear/image_color_rect/compressed + camera_info_topic_name: /wide_angle_camera_rear/camera_info + data_type: image + + velodyne: + topic_name: /point_cloud_filter/lidar/point_cloud_filtered + data_type: pointcloud + + front_bpearl: + topic_name: /robot_self_filter/bpearl_front/point_cloud + data_type: pointcloud + + rear_bpearl: + topic_name: /robot_self_filter/bpearl_rear/point_cloud + data_type: pointcloud \ No newline at end of file diff --git a/elevation_mapping_cupy/config/setups/menzi/base.yaml b/elevation_mapping_cupy/config/setups/menzi/base.yaml new file mode 100644 index 00000000..9d8234ad --- /dev/null +++ b/elevation_mapping_cupy/config/setups/menzi/base.yaml @@ -0,0 +1,25 @@ +elevation_mapping_node: + ros__parameters: + pointcloud_channel_fusions: + rgb: 'color' + default: 'average' + + image_channel_fusions: + rgb: 'color' + default: 'exponential' + feat_.*: 'exponential' + + subscribers: + front_cam: + topic_name: '/ouster_points_self_filtered' + data_type: pointcloud + + publishers: + elevation_map_raw: + layers: ['elevation', 'traversability', 'variance','rgb'] + basic_layers: ['elevation'] + fps: 5.0 + elevation_map_filter: + layers: ['min_filter', 'smooth', 'inpaint', 'elevation'] + basic_layers: ['min_filter'] + fps: 3.0 \ No newline at end of file diff --git a/elevation_mapping_cupy/config/setups/turtle_bot/plugin_config.yaml b/elevation_mapping_cupy/config/setups/turtle_bot/plugin_config.yaml new file mode 100644 index 00000000..43c7b44e --- /dev/null +++ b/elevation_mapping_cupy/config/setups/turtle_bot/plugin_config.yaml @@ -0,0 +1,68 @@ +/elevation_mapping_node: + ros__parameters: + # Settings of the plugins. (The plugins should be stored in script/plugins) + # min_filter fills in minimum value around the invalid cell. + min_filter: + enable: True # weather to load this plugin + fill_nan: False # Fill nans to invalid cells of elevation layer. + is_height_layer: True # If this is a height layer (such as elevation) or not (such as traversability) + layer_name: "min_filter" # The layer name. + extra_params: # This params are passed to the plugin class on initialization. + dilation_size: 1 # The patch size to apply + iteration_n: 30 # The number of iterations + # Apply smoothing. + smooth_filter: + enable: True + fill_nan: False + is_height_layer: True + layer_name: "smooth" + extra_params: + input_layer_name: "min_filter" + # Apply inpainting using opencv + inpainting: + enable: True + fill_nan: False + is_height_layer: True + layer_name: "inpaint" + extra_params: + method: "telea" # telea or ns + # Apply smoothing for inpainted layer + + robot_centric_elevation: # Use the same name as your file name. + enable: False # weather to load this plugin + fill_nan: False # Fill nans to invalid cells of elevation layer. + is_height_layer: True # If this is a height layer (such as elevation) or not (such as traversability) + layer_name: "robot_centric_elevation" # The layer name. + extra_params: # This params are passed to the plugin class on initialization. + resolution: 0.04 + threshold: 1.1 + use_threshold: True + + semantic_filter: + type: "semantic_filter" + enable: True + fill_nan: False + is_height_layer: False + layer_name: "max_categories" + extra_params: + classes: ['^sem_.*$'] + + semantic_traversability: + type: "semantic_traversability" + enable: False + fill_nan: False + is_height_layer: False + layer_name: "semantic_traversability" + extra_params: + layers: ['traversability','robot_centric_elevation'] + thresholds: [0.3,0.5] + type: ['traversability', 'elevation'] + + features_pca: + type: "features_pca" + enable: True + fill_nan: False + is_height_layer: False + layer_name: "pca" + extra_params: + process_layer_names: ["^feat_.*$"] \ No newline at end of file diff --git a/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics_image.yaml b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics_image.yaml new file mode 100644 index 00000000..926c5fa9 --- /dev/null +++ b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics_image.yaml @@ -0,0 +1,46 @@ +/elevation_mapping_node: + ros__parameters: + #### Plugins ######## + plugin_config_file: '$(rospack find elevation_mapping_cupy)/config/setups/turtle_bot/plugin_config.yaml' + + pointcloud_channel_fusions: + none: 'none' + # rgb: 'color' + # default: 'average' + + image_channel_fusions: + rgb: 'color' + default: 'exponential' + feat_.*: 'exponential' + + #### Subscribers ######## + subscribers: + color_cam: # for color camera + topic_name: '/camera/rgb/image_raw' + camera_info_topic_name: '/camera/depth/camera_info' + data_type: image + semantic_cam: # for semantic images + topic_name: '/front_cam/semantic_image' + camera_info_topic_name: '/front_cam/camera/depth/camera_info_resized' + channel_info_topic_name: '/front_cam/channel_info' + data_type: image + front_cam_pointcloud: + topic_name: '/camera/depth/points' + data_type: pointcloud + feat_front: + topic_name: '/front_cam/semantic_seg_feat' + camera_info_topic_name: '/front_cam/camera/depth/camera_info_resized' + channel_info_topic_name: '/front_cam/feat_channel_info' + data_type: image + + + #### Publishers ######## + publishers: + elevation_map_raw: + layers: ['elevation', 'traversability', 'variance','rgb','max_categories', 'pca'] + basic_layers: ['elevation'] + fps: 5.0 + elevation_map_filter: + layers: ['min_filter', 'smooth', 'inpaint', 'elevation','rgb','max_categories'] + basic_layers: ['min_filter'] + fps: 3.0 \ No newline at end of file diff --git a/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics_pointcloud.yaml b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics_pointcloud.yaml new file mode 100644 index 00000000..ab84f7b3 --- /dev/null +++ b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_semantics_pointcloud.yaml @@ -0,0 +1,30 @@ +/elevation_mapping_node: + ros__parameters: + #### Plugins ######## + plugin_config_file: '$(rospack find elevation_mapping_cupy)/config/setups/turtle_bot/plugin_config.yaml' + + pointcloud_channel_fusions: + rgb: 'color' + default: 'average' + + image_channel_fusions: + rgb: 'color' + default: 'exponential' + + #### Subscribers ######## + subscribers: + front_cam_pointcloud: + topic_name: '/camera/depth/points' + data_type: pointcloud + + + #### Publishers ######## + publishers: + elevation_map_raw: + layers: ['elevation', 'traversability', 'variance','rgb','max_categories'] + basic_layers: ['elevation'] + fps: 5.0 + elevation_map_filter: + layers: ['min_filter', 'smooth', 'inpaint', 'elevation','rgb','max_categories'] + basic_layers: ['min_filter'] + fps: 3.0 \ No newline at end of file diff --git a/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_simple.yaml b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_simple.yaml new file mode 100644 index 00000000..6d274f25 --- /dev/null +++ b/elevation_mapping_cupy/config/setups/turtle_bot/turtle_bot_simple.yaml @@ -0,0 +1,26 @@ +/elevation_mapping_node: + ros__parameters: + pointcloud_channel_fusions: + rgb: 'color' + default: 'average' + + image_channel_fusions: + rgb: 'color' + default: 'exponential' + feat_.*: 'exponential' + + subscribers: + front_cam: + topic_name: '/intel_realsense_r200_depth/points' + data_type: pointcloud + channels: ['rgb'] + + publishers: + elevation_map_raw: + layers: ['elevation', 'traversability', 'variance','rgb'] + basic_layers: ['elevation'] + fps: 5.0 + elevation_map_filter: + layers: ['min_filter', 'smooth', 'inpaint', 'elevation'] + basic_layers: ['min_filter'] + fps: 3.0 \ No newline at end of file diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/__init__.py b/elevation_mapping_cupy/elevation_mapping_cupy/__init__.py new file mode 100644 index 00000000..a16e894d --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/__init__.py @@ -0,0 +1,2 @@ +from .parameter import Parameter +from .elevation_mapping import ElevationMap diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/elevation_mapping.py b/elevation_mapping_cupy/elevation_mapping_cupy/elevation_mapping.py new file mode 100644 index 00000000..90b841c7 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/elevation_mapping.py @@ -0,0 +1,1008 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import os +from typing import List, Any, Tuple, Union + +import numpy as np +import threading +import subprocess + +from elevation_mapping_cupy.traversability_filter import ( + get_filter_chainer, + get_filter_torch, +) +from elevation_mapping_cupy.parameter import Parameter + +from elevation_mapping_cupy.kernels import ( + add_points_kernel, + add_color_kernel, + color_average_kernel, +) + +from elevation_mapping_cupy.kernels import sum_kernel +from elevation_mapping_cupy.kernels import error_counting_kernel +from elevation_mapping_cupy.kernels import average_map_kernel +from elevation_mapping_cupy.kernels import dilation_filter_kernel +from elevation_mapping_cupy.kernels import normal_filter_kernel +from elevation_mapping_cupy.kernels import polygon_mask_kernel +from elevation_mapping_cupy.kernels import image_to_map_correspondence_kernel + +from elevation_mapping_cupy.map_initializer import MapInitializer +from elevation_mapping_cupy.plugins.plugin_manager import PluginManager +from elevation_mapping_cupy.semantic_map import SemanticMap +from elevation_mapping_cupy.traversability_polygon import ( + get_masked_traversability, + is_traversable, + calculate_area, + transform_to_map_position, + transform_to_map_index, +) + +import cupy as cp +import rclpy # Import rclpy for ROS 2 logging + +xp = cp +pool = cp.cuda.MemoryPool(cp.cuda.malloc_managed) +cp.cuda.set_allocator(pool.malloc) + + +class ElevationMap: + """Core elevation mapping class.""" + + def __init__(self, param: Parameter): + """ + + Args: + param (elevation_mapping_cupy.parameter.Parameter): + """ + + self.param = param + self.data_type = self.param.data_type + self.resolution = param.resolution + self.center = xp.array([0, 0, 0], dtype=self.data_type) + self.base_rotation = xp.eye(3, dtype=self.data_type) + self.map_length = param.map_length + self.cell_n = param.cell_n + + self.map_lock = threading.Lock() + self.semantic_map = SemanticMap(self.param) + self.elevation_map = xp.zeros((7, self.cell_n, self.cell_n), dtype=self.data_type) + self.layer_names = [ + "elevation", + "variance", + "is_valid", + "traversability", + "time", + "upper_bound", + "is_upper_bound", + ] + + # buffers + self.traversability_buffer = xp.full((self.cell_n, self.cell_n), xp.nan) + self.normal_map = xp.zeros((3, self.cell_n, self.cell_n), dtype=self.data_type) + # Initial variance + self.initial_variance = param.initial_variance + self.elevation_map[1] += self.initial_variance + self.elevation_map[3] += 1.0 + + # overlap clearance + cell_range = int(self.param.overlap_clear_range_xy / self.resolution) + cell_range = np.clip(cell_range, 0, self.cell_n) + self.cell_min = self.cell_n // 2 - cell_range // 2 + self.cell_max = self.cell_n // 2 + cell_range // 2 + + # Initial mean_error + self.mean_error = 0.0 + self.additive_mean_error = 0.0 + + self.compile_kernels() + + self.compile_image_kernels() + + self.semantic_map.initialize_fusion() + + weight_file = subprocess.getoutput('echo "' + param.weight_file + '"') + param.load_weights(weight_file) + + if param.use_chainer: + self.traversability_filter = get_filter_chainer(param.w1, param.w2, param.w3, param.w_out) + else: + self.traversability_filter = get_filter_torch(param.w1, param.w2, param.w3, param.w_out) + self.untraversable_polygon = xp.zeros((1, 2)) + + # Plugins + self.plugin_manager = PluginManager(cell_n=self.cell_n) + plugin_config_file = subprocess.getoutput('echo "' + param.plugin_config_file + '"') + self.plugin_manager.load_plugin_settings(plugin_config_file) + + self.map_initializer = MapInitializer(self.initial_variance, param.initialized_variance, xp=cp, method="points") + + def clear(self): + """Reset all the layers of the elevation & the semantic map.""" + with self.map_lock: + self.elevation_map *= 0.0 + # Initial variance + self.elevation_map[1] += self.initial_variance + self.semantic_map.clear() + + self.mean_error = 0.0 + self.additive_mean_error = 0.0 + + def get_center_position(self, position): + """Return the position of the map center. + + Args: + position (numpy.ndarray): + + """ + position[0][:] = xp.asnumpy(self.center) + + def move(self, delta_position): + """Shift the map along all three axes according to the input. + + Args: + delta_position (numpy.ndarray): + """ + # Shift map using delta position. + delta_position = xp.asarray(delta_position) + delta_pixel = xp.round(delta_position[:2] / self.resolution) + delta_position_xy = delta_pixel * self.resolution + self.center[:2] += xp.asarray(delta_position_xy) + self.center[2] += xp.asarray(delta_position[2]) + dy_dx = xp.array([delta_pixel[1], delta_pixel[0]]) + self.shift_map_xy(dy_dx) + self.shift_map_z(-delta_position[2]) + + def move_to(self, position, R): + """Shift the map to an absolute position and update the rotation of the robot. + + Args: + position (numpy.ndarray): + R (cupy._core.core.ndarray): + """ + # Shift map to the center of robot. + self.base_rotation = xp.asarray(R, dtype=self.data_type) + position = xp.asarray(position) + delta = position - self.center + delta_pixel = xp.around(delta[:2] / self.resolution) + delta_xy = delta_pixel * self.resolution + self.center[:2] += delta_xy + self.center[2] += delta[2] + dy_dx = xp.array([delta_pixel[1], delta_pixel[0]]) + self.shift_map_xy(-dy_dx) + self.shift_map_z(-delta[2]) + + def pad_value(self, x, shift_value, idx=None, value=0.0): + """Create a padding of the map along x,y-axis according to amount that has shifted. + + Args: + x (cupy._core.core.ndarray): + shift_value (cupy._core.core.ndarray): + idx (Union[None, int, None, None]): + value (float): + """ + if idx is None: + if shift_value[0] > 0: + x[:, : shift_value[0], :] = value + elif shift_value[0] < 0: + x[:, shift_value[0] :, :] = value + if shift_value[1] > 0: + x[:, :, : shift_value[1]] = value + elif shift_value[1] < 0: + x[:, :, shift_value[1] :] = value + else: + if shift_value[0] > 0: + x[idx, : shift_value[0], :] = value + elif shift_value[0] < 0: + x[idx, shift_value[0] :, :] = value + if shift_value[1] > 0: + x[idx, :, : shift_value[1]] = value + elif shift_value[1] < 0: + x[idx, :, shift_value[1] :] = value + + def shift_map_xy(self, delta_pixel): + """Shift the map along the horizontal axes according to the input. + + Args: + delta_pixel (cupy._core.core.ndarray): + + """ + shift_value = delta_pixel.astype(cp.int32) + if cp.abs(shift_value).sum() == 0: + return + with self.map_lock: + self.elevation_map = cp.roll(self.elevation_map, shift_value, axis=(1, 2)) + self.pad_value(self.elevation_map, shift_value, value=0.0) + self.pad_value(self.elevation_map, shift_value, idx=1, value=self.initial_variance) + self.semantic_map.shift_map_xy(shift_value) + + def shift_map_z(self, delta_z): + """Shift the relevant layers along the vertical axis. + + Args: + delta_z (cupy._core.core.ndarray): + """ + with self.map_lock: + # elevation + self.elevation_map[0] += delta_z + # upper bound + self.elevation_map[5] += delta_z + + def compile_kernels(self): + """Compile all kernels belonging to the elevation map.""" + + self.new_map = cp.zeros( + (self.elevation_map.shape[0], self.cell_n, self.cell_n), + dtype=self.data_type, + ) + self.traversability_input = cp.zeros((self.cell_n, self.cell_n), dtype=self.data_type) + self.traversability_mask_dummy = cp.zeros((self.cell_n, self.cell_n), dtype=self.data_type) + self.min_filtered = cp.zeros((self.cell_n, self.cell_n), dtype=self.data_type) + self.min_filtered_mask = cp.zeros((self.cell_n, self.cell_n), dtype=self.data_type) + self.mask = cp.zeros((self.cell_n, self.cell_n), dtype=self.data_type) + + self.add_points_kernel = add_points_kernel( + self.resolution, + self.cell_n, + self.cell_n, + self.param.sensor_noise_factor, + self.param.mahalanobis_thresh, + self.param.outlier_variance, + self.param.wall_num_thresh, + self.param.max_ray_length, + self.param.cleanup_step, + self.param.min_valid_distance, + self.param.max_height_range, + self.param.cleanup_cos_thresh, + self.param.ramped_height_range_a, + self.param.ramped_height_range_b, + self.param.ramped_height_range_c, + self.param.enable_edge_sharpen, + self.param.enable_visibility_cleanup, + ) + self.error_counting_kernel = error_counting_kernel( + self.resolution, + self.cell_n, + self.cell_n, + self.param.sensor_noise_factor, + self.param.mahalanobis_thresh, + self.param.drift_compensation_variance_inlier, + self.param.traversability_inlier, + self.param.min_valid_distance, + self.param.max_height_range, + self.param.ramped_height_range_a, + self.param.ramped_height_range_b, + self.param.ramped_height_range_c, + ) + self.average_map_kernel = average_map_kernel( + self.cell_n, self.cell_n, self.param.max_variance, self.initial_variance + ) + + self.dilation_filter_kernel = dilation_filter_kernel(self.cell_n, self.cell_n, self.param.dilation_size) + self.dilation_filter_kernel_initializer = dilation_filter_kernel( + self.cell_n, self.cell_n, self.param.dilation_size_initialize + ) + self.polygon_mask_kernel = polygon_mask_kernel(self.cell_n, self.cell_n, self.resolution) + self.normal_filter_kernel = normal_filter_kernel(self.cell_n, self.cell_n, self.resolution) + + def compile_image_kernels(self): + """Compile kernels related to processing image messages.""" + + for config in self.param.subscriber_cfg.values(): + if config["data_type"] == "image": + self.valid_correspondence = cp.asarray( + np.zeros((self.cell_n, self.cell_n), dtype=np.bool_), dtype=np.bool_ + ) + self.uv_correspondence = cp.asarray( + np.zeros((2, self.cell_n, self.cell_n), dtype=np.float32), + dtype=np.float32, + ) + # self.distance_correspondence = cp.asarray( + # np.zeros((self.cell_n, self.cell_n), dtype=np.float32), dtype=np.float32 + # ) + # TODO tolerance_z_collision add parameter + self.image_to_map_correspondence_kernel = image_to_map_correspondence_kernel( + resolution=self.resolution, + width=self.cell_n, + height=self.cell_n, + tolerance_z_collision=0.10, + ) + break + + def shift_translation_to_map_center(self, t): + """Deduct the map center to get the translation of a point w.r.t. the map center. + + Args: + t (cupy._core.core.ndarray): Absolute point position + """ + t -= self.center + + def update_map_with_kernel(self, points_all, channels, R, t, position_noise, orientation_noise): + """Update map with new measurement. + + Args: + points_all (cupy._core.core.ndarray): + channels (List[str]): + R (cupy._core.core.ndarray): + t (cupy._core.core.ndarray): + position_noise (float): + orientation_noise (float): + """ + self.new_map *= 0.0 + error = cp.array([0.0], dtype=cp.float32) + error_cnt = cp.array([0], dtype=cp.float32) + points = points_all[:, :3] + + with self.map_lock: + self.shift_translation_to_map_center(t) + + # Log before kernel execution + + self.error_counting_kernel( + self.elevation_map, + points, + cp.array([0.0], dtype=self.data_type), + cp.array([0.0], dtype=self.data_type), + R, + t, + self.new_map, + error, + error_cnt, + size=(points.shape[0]), + ) + + if ( + self.param.enable_drift_compensation + and error_cnt > self.param.min_height_drift_cnt + and ( + position_noise > self.param.position_noise_thresh + or orientation_noise > self.param.orientation_noise_thresh + ) + ): + self.mean_error = error / error_cnt + self.additive_mean_error += self.mean_error + if np.abs(self.mean_error) < self.param.max_drift: + self.elevation_map[0] += self.mean_error * self.param.drift_compensation_alpha + + + self.add_points_kernel( + cp.array([0.0], dtype=self.data_type), + cp.array([0.0], dtype=self.data_type), + R, + t, + self.normal_map, + points, + self.elevation_map, + self.new_map, + size=(points.shape[0]), + ) + + # Log after adding points + + self.average_map_kernel(self.new_map, self.elevation_map, size=(self.cell_n * self.cell_n)) + + self.semantic_map.update_layers_pointcloud(points_all, channels, R, t, self.new_map) + + if self.param.enable_overlap_clearance: + self.clear_overlap_map(t) + + self.traversability_input *= 0.0 + self.dilation_filter_kernel( + self.elevation_map[5], + self.elevation_map[2] + self.elevation_map[6], + self.traversability_input, + self.traversability_mask_dummy, + size=(self.cell_n * self.cell_n), + ) + + traversability = self.traversability_filter(self.traversability_input) + self.elevation_map[3][3:-3, 3:-3] = traversability.reshape( + (traversability.shape[2], traversability.shape[3]) + ) + + # Log final state + self.update_normal(self.traversability_input) + + def clear_overlap_map(self, t): + """Clear overlapping areas around the map center. + + Args: + t (cupy._core.core.ndarray): Absolute point position + """ + + height_min = t[2] - self.param.overlap_clear_range_z + height_max = t[2] + self.param.overlap_clear_range_z + near_map = self.elevation_map[:, self.cell_min : self.cell_max, self.cell_min : self.cell_max] + valid_idx = ~cp.logical_or(near_map[0] < height_min, near_map[0] > height_max) + near_map[0] = cp.where(valid_idx, near_map[0], 0.0) + near_map[1] = cp.where(valid_idx, near_map[1], self.initial_variance) + near_map[2] = cp.where(valid_idx, near_map[2], 0.0) + valid_idx = ~cp.logical_or(near_map[5] < height_min, near_map[5] > height_max) + near_map[5] = cp.where(valid_idx, near_map[5], 0.0) + near_map[6] = cp.where(valid_idx, near_map[6], 0.0) + self.elevation_map[:, self.cell_min : self.cell_max, self.cell_min : self.cell_max] = near_map + + def get_additive_mean_error(self): + """Returns the additive mean error. + + Returns: + + """ + return self.additive_mean_error + + def update_variance(self): + """Adds the time variacne to the valid cells.""" + self.elevation_map[1] += self.param.time_variance * self.elevation_map[2] + + def update_time(self): + """adds the time interval to the time layer.""" + self.elevation_map[4] += self.param.time_interval + + def update_upper_bound_with_valid_elevation(self): + """Filters all invalid cell's upper_bound and is_upper_bound layers.""" + mask = self.elevation_map[2] > 0.5 + self.elevation_map[5] = cp.where(mask, self.elevation_map[0], self.elevation_map[5]) + self.elevation_map[6] = cp.where(mask, 0.0, self.elevation_map[6]) + + def input_pointcloud( + self, + raw_points: cp._core.core.ndarray, + channels: List[str], + R: cp._core.core.ndarray, + t: cp._core.core.ndarray, + position_noise: float, + orientation_noise: float, + ): + """Input the point cloud and fuse the new measurements to update the elevation map. + + Args: + raw_points (cupy._core.core.ndarray): + channels (List[str]): + R (cupy._core.core.ndarray): + t (cupy._core.core.ndarray): + position_noise (float): + orientation_noise (float): + + Returns: + None: + """ + raw_points = cp.asarray(raw_points, dtype=self.data_type) + + # Check for the sanity of the raw points + min_points = cp.min(raw_points, axis=0) + max_points = cp.max(raw_points, axis=0) + mean_points = cp.mean(raw_points, axis=0) + + additional_channels = channels[3:] + raw_points = raw_points[~cp.isnan(raw_points[:, :3]).any(axis=1)] + self.update_map_with_kernel( + raw_points, + additional_channels, + cp.asarray(R, dtype=self.data_type), + cp.asarray(t, dtype=self.data_type), + position_noise, + orientation_noise, + ) + + def input_image( + self, + image: List[cp._core.core.ndarray], + channels: List[str], + # fusion_methods: List[str], + R: cp._core.core.ndarray, + t: cp._core.core.ndarray, + K: cp._core.core.ndarray, + D: cp._core.core.ndarray, + distortion_model: str, + image_height: int, + image_width: int, + ): + """Input image and fuse the new measurements to update the elevation map. + + Args: + sub_key (str): Key used to identify the subscriber configuration + image (List[cupy._core.core.ndarray]): List of array containing the individual image input channels + R (cupy._core.core.ndarray): Camera optical center rotation + t (cupy._core.core.ndarray): Camera optical center translation + K (cupy._core.core.ndarray): Camera intrinsics + image_height (int): Image height + image_width (int): Image width + + Returns: + None: + """ + + image = np.stack(image, axis=0) + if len(image.shape) == 2: + image = image[None] + + # Convert to cupy + image = cp.asarray(image, dtype=self.data_type) + K = cp.asarray(K, dtype=self.data_type) + R = cp.asarray(R, dtype=self.data_type) + t = cp.asarray(t, dtype=self.data_type) + D = cp.asarray(D, dtype=self.data_type) + image_height = cp.float32(image_height) + image_width = cp.float32(image_width) + + if len(D) < 4: + D = cp.zeros(5, dtype=self.data_type) + elif len(D) == 4: + D = cp.concatenate([D, cp.zeros(1, dtype=self.data_type)]) + else: + D = D[:5] + + if distortion_model == "radtan": + pass + elif distortion_model == "equidistant": + # Not implemented yet. + D *= 0 + elif distortion_model == "plumb_bob": + # Not implemented yet. + D *= 0 + else: + # Not implemented yet. + D *= 0 + + # Calculate transformation matrix + P = cp.asarray(K @ cp.concatenate([R, t[:, None]], 1), dtype=np.float32) + t_cam_map = -R.T @ t - self.center + t_cam_map = t_cam_map.get() + x1 = cp.uint32((self.cell_n / 2) + ((t_cam_map[0]) / self.resolution)) + y1 = cp.uint32((self.cell_n / 2) + ((t_cam_map[1]) / self.resolution)) + z1 = cp.float32(t_cam_map[2]) + + self.uv_correspondence *= 0 + self.valid_correspondence[:, :] = False + + with self.map_lock: + self.image_to_map_correspondence_kernel( + self.elevation_map, + x1, + y1, + z1, + P.reshape(-1), + K.reshape(-1), + D.reshape(-1), + image_height, + image_width, + self.center, + self.uv_correspondence, + self.valid_correspondence, + size=int(self.cell_n * self.cell_n), + ) + self.semantic_map.update_layers_image( + image, + channels, + self.uv_correspondence, + self.valid_correspondence, + image_height, + image_width, + ) + + def update_normal(self, dilated_map): + """Clear the normal map and then apply the normal kernel with dilated map as input. + + Args: + dilated_map (cupy._core.core.ndarray): + """ + with self.map_lock: + self.normal_map *= 0.0 + self.normal_filter_kernel( + dilated_map, + self.elevation_map[2], + self.normal_map, + size=(self.cell_n * self.cell_n), + ) + + def process_map_for_publish(self, input_map, fill_nan=False, add_z=False, xp=cp): + """Process the input_map according to the fill_nan and add_z flags. + + Args: + input_map (cupy._core.core.ndarray): + fill_nan (bool): + add_z (bool): + xp (module): + + Returns: + cupy._core.core.ndarray: + """ + m = input_map.copy() + if fill_nan: + m = xp.where(self.elevation_map[2] > 0.5, m, xp.nan) + if add_z: + m = m + self.center[2] + return m[1:-1, 1:-1] + + def get_elevation(self): + """Get the elevation layer. + + Returns: + elevation layer + + """ + return self.process_map_for_publish(self.elevation_map[0], fill_nan=True, add_z=True) + + def get_variance(self): + """Get the variance layer. + + Returns: + variance layer + """ + return self.process_map_for_publish(self.elevation_map[1], fill_nan=False, add_z=False) + + def get_traversability(self): + """Get the traversability layer. + + Returns: + traversability layer + """ + traversability = cp.where( + (self.elevation_map[2] + self.elevation_map[6]) > 0.5, + self.elevation_map[3].copy(), + cp.nan, + ) + self.traversability_buffer[3:-3, 3:-3] = traversability[3:-3, 3:-3] + traversability = self.traversability_buffer[1:-1, 1:-1] + return traversability + + def get_time(self): + """Get the time layer. + + Returns: + time layer + """ + return self.process_map_for_publish(self.elevation_map[4], fill_nan=False, add_z=False) + + def get_upper_bound(self): + """Get the upper bound layer. + + Returns: + upper_bound: upper bound layer + """ + if self.param.use_only_above_for_upper_bound: + valid = cp.logical_or( + cp.logical_and(self.elevation_map[5] > 0.0, self.elevation_map[6] > 0.5), + self.elevation_map[2] > 0.5, + ) + else: + valid = cp.logical_or(self.elevation_map[2] > 0.5, self.elevation_map[6] > 0.5) + upper_bound = cp.where(valid, self.elevation_map[5].copy(), cp.nan) + upper_bound = upper_bound[1:-1, 1:-1] + self.center[2] + return upper_bound + + def get_is_upper_bound(self): + """Get the is upper bound layer. + + Returns: + is_upper_bound: layer + """ + if self.param.use_only_above_for_upper_bound: + valid = cp.logical_or( + cp.logical_and(self.elevation_map[5] > 0.0, self.elevation_map[6] > 0.5), + self.elevation_map[2] > 0.5, + ) + else: + valid = cp.logical_or(self.elevation_map[2] > 0.5, self.elevation_map[6] > 0.5) + is_upper_bound = cp.where(valid, self.elevation_map[6].copy(), cp.nan) + is_upper_bound = is_upper_bound[1:-1, 1:-1] + return is_upper_bound + + def xp_of_array(self, array): + """Indicate which library is used for xp. + + Args: + array (cupy._core.core.ndarray): + + Returns: + module: either np or cp + """ + if type(array) == cp.ndarray: + return cp + elif type(array) == np.ndarray: + return np + + def copy_to_cpu(self, array, data, stream=None): + """Transforms the data to float32 and if on gpu loads it to cpu. + + Args: + array (cupy._core.core.ndarray): + data (numpy.ndarray): + stream (Union[None, cupy.cuda.stream.Stream, None, None, None, None, None, None, None]): + """ + if type(array) == np.ndarray: + data[...] = array.astype(np.float32) + elif type(array) == cp.ndarray: + if stream is not None: + data[...] = cp.asnumpy(array.astype(np.float32), stream=stream) + else: + data[...] = cp.asnumpy(array.astype(np.float32)) + + def exists_layer(self, name): + """Check if the layer exists in elevation map or in the semantic map. + + Args: + name (str): Layer name + + Returns: + bool: Indicates if layer exists. + """ + if name in self.layer_names: + return True + elif name in self.semantic_map.layer_names: + return True + elif name in self.plugin_manager.layer_names: + return True + else: + return False + + def get_map_with_name_ref(self, name, data): + """Load a layer according to the name input to the data input. + + Args: + name (str): Name of the layer. + data (numpy.ndarray): Data structure that contains layer. + + """ + use_stream = True + xp = cp + with self.map_lock: + if name == "elevation": + m = self.get_elevation() + use_stream = False + elif name == "variance": + m = self.get_variance() + elif name == "traversability": + m = self.get_traversability() + elif name == "time": + m = self.get_time() + elif name == "upper_bound": + m = self.get_upper_bound() + elif name == "is_upper_bound": + m = self.get_is_upper_bound() + elif name == "normal_x": + m = self.normal_map.copy()[0, 1:-1, 1:-1] + elif name == "normal_y": + m = self.normal_map.copy()[1, 1:-1, 1:-1] + elif name == "normal_z": + m = self.normal_map.copy()[2, 1:-1, 1:-1] + elif name in self.semantic_map.layer_names: + m = self.semantic_map.get_map_with_name(name) + elif name in self.plugin_manager.layer_names: + self.plugin_manager.update_with_name( + name, + self.elevation_map, + self.layer_names, + self.semantic_map.semantic_map, + self.semantic_map.layer_names, + self.base_rotation, + self.semantic_map.elements_to_shift, + ) + m = self.plugin_manager.get_map_with_name(name) + p = self.plugin_manager.get_param_with_name(name) + xp = self.xp_of_array(m) + m = self.process_map_for_publish(m, fill_nan=p.fill_nan, add_z=p.is_height_layer, xp=xp) + elif name == "rgb": + # Special handling for rgb layer - check if any semantic layer uses color fusion + color_layer = None + for layer_name in self.semantic_map.layer_names: + if (layer_name in self.semantic_map.layer_specs_points and + self.semantic_map.layer_specs_points[layer_name] == "color"): + color_layer = layer_name + break + elif (layer_name in self.semantic_map.layer_specs_image and + self.semantic_map.layer_specs_image[layer_name] == "color"): + color_layer = layer_name + break + + if color_layer: + # Get the RGB data from the color layer + m = self.semantic_map.get_rgb(color_layer) + else: + # No RGB data available, return silently + return + else: + print("Layer {} is not in the map".format(name)) + return + # Need 180 degree rotation to match coordinate system + m = xp.flip(m, 0) + m = xp.flip(m, 1) + if use_stream: + stream = cp.cuda.Stream(non_blocking=False) + else: + stream = None + self.copy_to_cpu(m, data, stream=stream) + + def get_normal_maps(self): + """Get the normal maps. + + Returns: + maps: the three normal values for each cell + """ + normal = self.normal_map.copy() + normal_x = normal[0, 1:-1, 1:-1] + normal_y = normal[1, 1:-1, 1:-1] + normal_z = normal[2, 1:-1, 1:-1] + maps = xp.stack([normal_x, normal_y, normal_z], axis=0) + maps = xp.flip(maps, 1) + maps = xp.flip(maps, 2) + maps = xp.asnumpy(maps) + return maps + + def get_normal_ref(self, normal_x_data, normal_y_data, normal_z_data): + """Get the normal maps as reference. + + Args: + normal_x_data: + normal_y_data: + normal_z_data: + """ + maps = self.get_normal_maps() + self.stream = cp.cuda.Stream(non_blocking=True) + normal_x_data[...] = xp.asnumpy(maps[0], stream=self.stream) + normal_y_data[...] = xp.asnumpy(maps[1], stream=self.stream) + normal_z_data[...] = xp.asnumpy(maps[2], stream=self.stream) + + def get_layer(self, name): + """Return the layer with the name input. + + Args: + name: The layers name. + + Returns: + return_map: The rqeuested layer. + + """ + if name in self.layer_names: + idx = self.layer_names.index(name) + return_map = self.elevation_map[idx] + elif name in self.semantic_map.layer_names: + idx = self.semantic_map.layer_names.index(name) + return_map = self.semantic_map.semantic_map[idx] + elif name in self.plugin_manager.layer_names: + self.plugin_manager.update_with_name( + name, + self.elevation_map, + self.layer_names, + self.semantic_map, + self.base_rotation, + ) + return_map = self.plugin_manager.get_map_with_name(name) + else: + print("Layer {} is not in the map, returning traversabiltiy!".format(name)) + return + return return_map + + def get_polygon_traversability(self, polygon, result): + """Check if input polygons are traversable. + + Args: + polygon (cupy._core.core.ndarray): + result (numpy.ndarray): + + Returns: + Union[None, int]: + """ + polygon = xp.asarray(polygon) + area = calculate_area(polygon) + polygon = polygon.astype(self.data_type) + pmin = self.center[:2] - self.map_length / 2 + self.resolution + pmax = self.center[:2] + self.map_length / 2 - self.resolution + polygon[:, 0] = polygon[:, 0].clip(pmin[0], pmax[0]) + polygon[:, 1] = polygon[:, 1].clip(pmin[1], pmax[1]) + polygon_min = polygon.min(axis=0) + polygon_max = polygon.max(axis=0) + polygon_bbox = cp.concatenate([polygon_min, polygon_max]).flatten() + polygon_n = xp.array(polygon.shape[0], dtype=np.int16) + clipped_area = calculate_area(polygon) + self.polygon_mask_kernel( + polygon, + self.center[0], + self.center[1], + polygon_n, + polygon_bbox, + self.mask, + size=(self.cell_n * self.cell_n), + ) + tmp_map = self.get_layer(self.param.checker_layer) + masked, masked_isvalid = get_masked_traversability(self.elevation_map, self.mask, tmp_map) + if masked_isvalid.sum() > 0: + t = masked.sum() / masked_isvalid.sum() + else: + t = cp.asarray(0.0, dtype=self.data_type) + is_safe, un_polygon = is_traversable( + masked, + self.param.safe_thresh, + self.param.safe_min_thresh, + self.param.max_unsafe_n, + ) + untraversable_polygon_num = 0 + if un_polygon is not None: + un_polygon = transform_to_map_position(un_polygon, self.center[:2], self.cell_n, self.resolution) + untraversable_polygon_num = un_polygon.shape[0] + if clipped_area < 0.001: + is_safe = False + print("requested polygon is outside of the map") + result[...] = np.array([is_safe, t.get(), area.get()]) + self.untraversable_polygon = un_polygon + return untraversable_polygon_num + + def get_untraversable_polygon(self, untraversable_polygon): + """Copy the untraversable polygons to input untraversable_polygons. + + Args: + untraversable_polygon (numpy.ndarray): + """ + untraversable_polygon[...] = xp.asnumpy(self.untraversable_polygon) + + def initialize_map(self, points, method="cubic"): + """Initializes the map according to some points and using an approximation according to method. + + Args: + points (numpy.ndarray): + method (str): Interpolation method ['linear', 'cubic', 'nearest'] + """ + self.clear() + with self.map_lock: + points = cp.asarray(points, dtype=self.data_type) + indices = transform_to_map_index(points[:, :2], self.center[:2], self.cell_n, self.resolution) + points[:, :2] = indices.astype(points.dtype) + points[:, 2] -= self.center[2] + self.map_initializer(self.elevation_map, points, method) + if self.param.dilation_size_initialize > 0: + for i in range(2): + self.dilation_filter_kernel_initializer( + self.elevation_map[0], + self.elevation_map[2], + self.elevation_map[0], + self.elevation_map[2], + size=(self.cell_n * self.cell_n), + ) + self.update_upper_bound_with_valid_elevation() + + +if __name__ == "__main__": + # Test script for profiling. + # $ python -m cProfile -o profile.stats elevation_mapping.py + # $ snakeviz profile.stats + xp.random.seed(123) + R = xp.random.rand(3, 3) + t = xp.random.rand(3) + print(R, t) + param = Parameter( + use_chainer=False, + weight_file="../config/weights.dat", + plugin_config_file="../config/plugin_config.yaml", + ) + param.additional_layers = ["rgb", "grass", "tree", "people"] + param.fusion_algorithms = ["color", "class_bayesian", "class_bayesian", "class_bayesian"] + param.update() + elevation = ElevationMap(param) + layers = [ + "elevation", + "variance", + "traversability", + "min_filter", + "smooth", + "inpaint", + "rgb", + ] + points = xp.random.rand(100000, len(layers)) + + channels = ["x", "y", "z"] + param.additional_layers + print(channels) + data = np.zeros((elevation.cell_n - 2, elevation.cell_n - 2), dtype=np.float32) + for i in range(50): + elevation.input_pointcloud(points, channels, R, t, 0, 0) + elevation.update_normal(elevation.elevation_map[0]) + pos = np.array([i * 0.01, i * 0.02, i * 0.01]) + elevation.move_to(pos, R) + for layer in layers: + elevation.get_map_with_name_ref(layer, data) + print(i) + polygon = cp.array([[0, 0], [2, 0], [0, 2]], dtype=param.data_type) + result = np.array([0, 0, 0]) + elevation.get_polygon_traversability(polygon, result) + print(result) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/elevation_mapping_node.py b/elevation_mapping_cupy/elevation_mapping_cupy/elevation_mapping_node.py new file mode 120000 index 00000000..4740ecba --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/elevation_mapping_node.py @@ -0,0 +1 @@ +../scripts/elevation_mapping_node.py \ No newline at end of file diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/__init__.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/fusion_manager.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/fusion_manager.py new file mode 100644 index 00000000..aca34a6d --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/fusion_manager.py @@ -0,0 +1,112 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +from abc import ABC, abstractmethod +import cupy as cp +from typing import List, Dict, Any +from dataclasses import dataclass +import importlib +import inspect + + +# @dataclass +# class FusionParams: +# name: str + + +class FusionBase(ABC): + """ + This is a base class of Fusion + """ + + @abstractmethod + def __init__(self, *args, **kwargs): + """ + + Args: + fusion_params : FusionParams + The parameter of callback + """ + self.name = None + + @abstractmethod + def __call__(self, points_all, R, t, pcl_ids, layer_ids, elevation_map, semantic_map, new_map): + pass + + +class FusionManager(object): + def __init__(self, params): + self.fusion_plugins: Dict[str, FusionBase] = {} + self.params = params + self.plugins = [] + + def register_plugin(self, plugin): + """ + Register a new fusion plugin + """ + try: + m = importlib.import_module("." + plugin, package="elevation_mapping_cupy.fusion") # -> 'module' + except: + raise ValueError("Plugin {} does not exist.".format(plugin)) + for name, obj in inspect.getmembers(m): + + if inspect.isclass(obj) and issubclass(obj, FusionBase) and name != "FusionBase": + self.plugins.append(obj(self.params)) + + def get_plugin_idx(self, name: str, data_type: str): + """ + Get a registered fusion plugin + """ + name = data_type + "_" + name + for idx, plugin in enumerate(self.plugins): + if plugin.name == name: + return idx + print("[WARNING] Plugin {} is not in the list: {}".format(name, self.plugins)) + return None + + def execute_plugin( + self, name: str, points_all, R, t, pcl_ids, layer_ids, elevation_map, semantic_map, new_map, elements_to_shift + ): + """ + Execute a registered fusion plugin + """ + idx = self.get_plugin_idx(name, "pointcloud") + if idx is not None: + self.plugins[idx]( + points_all, R, t, pcl_ids, layer_ids, elevation_map, semantic_map, new_map, elements_to_shift + ) + # else: + # raise ValueError("Plugin {} is not registered".format(name)) + + def execute_image_plugin( + self, + name: str, + sem_map_idx, + image, + j, + uv_correspondence, + valid_correspondence, + image_height, + image_width, + semantic_map, + new_map, + ): + """ + Execute a registered fusion plugin + """ + idx = self.get_plugin_idx(name, "image") + if idx is not None: + self.plugins[idx]( + sem_map_idx, + image, + j, + uv_correspondence, + valid_correspondence, + image_height, + image_width, + semantic_map, + new_map, + ) + # else: + # raise ValueError("Plugin {} is not registered".format(name)) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_color.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_color.py new file mode 100644 index 00000000..c882c1b0 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_color.py @@ -0,0 +1,86 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +import string + +from .fusion_manager import FusionBase + + +def color_correspondences_to_map_kernel(resolution, width, height): + color_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_rgb, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + + int idx_red = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + int idx_green = image_width * image_height + idx_red; + int idx_blue = image_width * image_height * 2 + idx_red; + + unsigned int r = image_rgb[idx_red]; + unsigned int g = image_rgb[idx_green]; + unsigned int b = image_rgb[idx_blue]; + + unsigned int rgb = (r<<16) + (g << 8) + b; + float rgb_ = __uint_as_float(rgb); + new_sem_map[get_map_idx(i, map_idx)] = rgb_; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + """ + ).substitute(), + name="color_correspondences_to_map_kernel", + ) + return color_correspondences_to_map_kernel + + +class ImageColor(FusionBase): + def __init__(self, params, *args, **kwargs): + # super().__init__(fusion_params, *args, **kwargs) + # print("Initialize fusion kernel") + self.name = "image_color" + self.cell_n = params.cell_n + self.resolution = params.resolution + + self.color_correspondences_to_map_kernel = color_correspondences_to_map_kernel( + resolution=self.resolution, width=self.cell_n, height=self.cell_n, + ) + + def __call__( + self, + sem_map_idx, + image, + j, + uv_correspondence, + valid_correspondence, + image_height, + image_width, + semantic_map, + new_map, + ): + self.color_correspondences_to_map_kernel( + semantic_map, + cp.uint64(sem_map_idx), + image, + uv_correspondence, + valid_correspondence, + image_height, + image_width, + new_map, + size=int(self.cell_n * self.cell_n), + ) + semantic_map[sem_map_idx] = new_map[sem_map_idx] diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_exponential.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_exponential.py new file mode 100644 index 00000000..26aa2f89 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_exponential.py @@ -0,0 +1,77 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +import string + +from .fusion_manager import FusionBase + + +def exponential_correspondences_to_map_kernel(resolution, width, height, alpha): + exponential_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_mono, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + int idx = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)] * (1-${alpha}) + ${alpha} * image_mono[idx]; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + + """ + ).substitute(alpha=alpha), + name="exponential_correspondences_to_map_kernel", + ) + return exponential_correspondences_to_map_kernel + + +class ImageExponential(FusionBase): + def __init__(self, params, *args, **kwargs): + # super().__init__(fusion_params, *args, **kwargs) + # print("Initialize fusion kernel") + self.name = "image_exponential" + self.cell_n = params.cell_n + self.resolution = params.resolution + + self.exponential_correspondences_to_map_kernel = exponential_correspondences_to_map_kernel( + resolution=self.resolution, width=self.cell_n, height=self.cell_n, alpha=0.7, + ) + + def __call__( + self, + sem_map_idx, + image, + j, + uv_correspondence, + valid_correspondence, + image_height, + image_width, + semantic_map, + new_map, + ): + self.exponential_correspondences_to_map_kernel( + semantic_map, + sem_map_idx, + image[j], + uv_correspondence, + valid_correspondence, + image_height, + image_width, + new_map, + size=int(self.cell_n * self.cell_n), + ) + semantic_map[sem_map_idx] = new_map[sem_map_idx] diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_latest.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_latest.py new file mode 100644 index 00000000..e8e7eb14 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/image_latest.py @@ -0,0 +1,77 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +import string + +from .fusion_manager import FusionBase + + +def latest_correspondences_to_map_kernel(resolution, width, height, alpha): + latest_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_mono, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + int idx = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + new_sem_map[get_map_idx(i, map_idx)] = image_mono[idx]; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + + """ + ).substitute(alpha=alpha), + name="latest_correspondences_to_map_kernel", + ) + return latest_correspondences_to_map_kernel + + +class ImageLatest(FusionBase): + def __init__(self, params, *args, **kwargs): + # super().__init__(fusion_params, *args, **kwargs) + # print("Initialize fusion kernel") + self.name = "image_latest" + self.cell_n = params.cell_n + self.resolution = params.resolution + + self.latest_correspondences_to_map_kernel = latest_correspondences_to_map_kernel( + resolution=self.resolution, width=self.cell_n, height=self.cell_n, alpha=0.7, + ) + + def __call__( + self, + sem_map_idx, + image, + j, + uv_correspondence, + valid_correspondence, + image_height, + image_width, + semantic_map, + new_map, + ): + self.latest_correspondences_to_map_kernel( + semantic_map, + sem_map_idx, + image[j], + uv_correspondence, + valid_correspondence, + image_height, + image_width, + new_map, + size=int(self.cell_n * self.cell_n), + ) + semantic_map[sem_map_idx] = new_map[sem_map_idx] diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_average.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_average.py new file mode 100644 index 00000000..c4165a54 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_average.py @@ -0,0 +1,113 @@ +import cupy as cp +import numpy as np +import string + +from .fusion_manager import FusionBase + + +def sum_kernel( + resolution, width, height, +): + """Sums the semantic values of the classes for the exponentiala verage or for the average. + + Args: + resolution: + width: + height: + + Returns: + + """ + # input the list of layers, amount of channels can slo be input through kernel + sum_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw U map, raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + U feat = p[id * pcl_channels[0] + pcl_chan[layer]]; + atomicAdd(&newmap[get_map_idx(idx, map_lay[layer])], feat); + } + } + """ + ).substitute(), + name="sum_kernel", + ) + return sum_kernel + + +def average_kernel( + width, height, +): + average_kernel = cp.ElementwiseKernel( + in_params="raw V newmap, raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U feat = newmap[get_map_idx(id, map_lay[layer])]/(1*cnt); + map[get_map_idx(id, map_lay[layer])] = feat; + } + """ + ).substitute(), + name="average_map_kernel", + ) + return average_kernel + + +class Average(FusionBase): + def __init__(self, params, *args, **kwargs): + # super().__init__(fusion_params, *args, **kwargs) + # print("Initialize fusion kernel") + self.name = "pointcloud_average" + self.cell_n = params.cell_n + self.resolution = params.resolution + self.sum_kernel = sum_kernel(self.resolution, self.cell_n, self.cell_n,) + self.average_kernel = average_kernel(self.cell_n, self.cell_n,) + + def __call__(self, points_all, R, t, pcl_ids, layer_ids, elevation_map, semantic_map, new_map, *args): + self.sum_kernel( + points_all, + R, + t, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + semantic_map, + new_map, + size=(points_all.shape[0] * pcl_ids.shape[0]), + ) + self.average_kernel( + new_map, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + elevation_map, + semantic_map, + size=(self.cell_n * self.cell_n * pcl_ids.shape[0]), + ) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_bayesian_inference.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_bayesian_inference.py new file mode 100644 index 00000000..95dc8aea --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_bayesian_inference.py @@ -0,0 +1,122 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +import string + +from .fusion_manager import FusionBase + + +def sum_compact_kernel( + resolution, width, height, +): + # input the list of layers, amount of channels can slo be input through kernel + sum_compact_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params=" raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + U feat = p[id * pcl_channels[0] + pcl_chan[layer]]; + atomicAdd(&newmap[get_map_idx(idx, layer)], feat); + } + } + """ + ).substitute(), + name="sum_compact_kernel", + ) + return sum_compact_kernel + + +def bayesian_inference_kernel( + width, height, +): + bayesian_inference_kernel = cp.ElementwiseKernel( + in_params=" raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U newmap, raw U sum_mean, raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U feat_ml = sum_mean[get_map_idx(id, layer)]/cnt; + U feat_old = map[get_map_idx(id, map_lay[layer])]; + U sigma_old = newmap[get_map_idx(id, map_lay[layer])]; + U sigma = 1.0; + U feat_new = sigma*feat_old /(cnt*sigma_old + sigma) +cnt*sigma_old *feat_ml /(cnt*sigma_old+sigma); + U sigma_new = sigma*sigma_old /(cnt*sigma_old +sigma); + map[get_map_idx(id, map_lay[layer])] = feat_new; + newmap[get_map_idx(id, map_lay[layer])] = sigma_new; + } + """ + ).substitute(), + name="bayesian_inference_kernel", + ) + return bayesian_inference_kernel + + +class BayesianInference(FusionBase): + def __init__(self, params, *args, **kwargs): + # super().__init__(fusion_params, *args, **kwargs) + # print("Initialize bayesian inference kernel") + self.name = "pointcloud_bayesian_inference" + + self.cell_n = params.cell_n + self.resolution = params.resolution + self.fusion_algorithms = params.fusion_algorithms + self.data_type = params.data_type + + self.sum_mean = cp.ones( + (self.fusion_algorithms.count("bayesian_inference"), self.cell_n, self.cell_n,), self.data_type, + ) + # TODO initialize the variance with a value different than 0 + self.sum_compact_kernel = sum_compact_kernel(self.resolution, self.cell_n, self.cell_n,) + self.bayesian_inference_kernel = bayesian_inference_kernel(self.cell_n, self.cell_n,) + + def __call__(self, points_all, R, t, pcl_ids, layer_ids, elevation_map, semantic_map, new_map, *args): + self.sum_mean *= 0 + self.sum_compact_kernel( + points_all, + R, + t, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + self.sum_mean, + size=(points_all.shape[0]), + ) + self.bayesian_inference_kernel( + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + elevation_map, + new_map, + self.sum_mean, + semantic_map, + size=(self.cell_n * self.cell_n), + ) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_average.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_average.py new file mode 100644 index 00000000..80c96dc4 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_average.py @@ -0,0 +1,126 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +import string + +from .fusion_manager import FusionBase + + +def sum_kernel( + resolution, width, height, +): + """Sums the semantic values of the classes for the exponentiala verage or for the average. + + Args: + resolution: + width: + height: + + Returns: + + """ + # input the list of layers, amount of channels can slo be input through kernel + sum_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw U map, raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + U feat = p[id * pcl_channels[0] + pcl_chan[layer]]; + atomicAdd(&newmap[get_map_idx(idx, map_lay[layer])], feat); + } + } + """ + ).substitute(), + name="sum_kernel", + ) + return sum_kernel + + +def class_average_kernel( + width, height, alpha, +): + class_average_kernel = cp.ElementwiseKernel( + in_params="raw V newmap, raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U prev_val = map[get_map_idx(id, map_lay[layer])]; + if (prev_val==0){ + U val = newmap[get_map_idx(id, map_lay[layer])]/(1*cnt); + map[get_map_idx(id, map_lay[layer])] = val; + } + else{ + U val = ${alpha} *prev_val + (1-${alpha}) * newmap[get_map_idx(id, map_lay[layer])]/(cnt); + map[get_map_idx(id, map_lay[layer])] = val; + } + } + """ + ).substitute(alpha=alpha,), + name="class_average_kernel", + ) + return class_average_kernel + + +class ClassAverage(FusionBase): + def __init__(self, params, *args, **kwargs): + # super().__init__(fusion_params, *args, **kwargs) + # print("Initialize fusion kernel") + self.name = "pointcloud_class_average" + self.cell_n = params.cell_n + self.resolution = params.resolution + self.average_weight = params.average_weight + + self.sum_kernel = sum_kernel(self.resolution, self.cell_n, self.cell_n,) + self.class_average_kernel = class_average_kernel(self.cell_n, self.cell_n, self.average_weight,) + + def __call__(self, points_all, R, t, pcl_ids, layer_ids, elevation_map, semantic_map, new_map, *args): + self.sum_kernel( + points_all, + R, + t, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + semantic_map, + new_map, + size=(points_all.shape[0] * pcl_ids.shape[0]), + ) + self.class_average_kernel( + new_map, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + elevation_map, + semantic_map, + size=(self.cell_n * self.cell_n * pcl_ids.shape[0]), + ) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_bayesian.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_bayesian.py new file mode 100644 index 00000000..5fec3a7d --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_bayesian.py @@ -0,0 +1,75 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +import string + +from .fusion_manager import FusionBase + + +def alpha_kernel( + resolution, width, height, +): + # input the list of layers, amount of channels can slo be input through kernel + alpha_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + U theta_max = 0; + W arg_max = 0; + U theta = p[id * pcl_channels[0] + pcl_chan[layer]]; + if (theta >=theta_max){ + arg_max = map_lay[layer]; + theta_max = theta; + } + atomicAdd(&newmap[get_map_idx(idx, arg_max)], theta_max); + } + } + """ + ).substitute(), + name="alpha_kernel", + ) + return alpha_kernel + + +class ClassBayesian(FusionBase): + def __init__(self, params, *args, **kwargs): + # super().__init__(fusion_params, *args, **kwargs) + # print("Initialize fusion kernel") + self.name = "pointcloud_class_bayesian" + self.cell_n = params.cell_n + self.resolution = params.resolution + self.alpha_kernel = alpha_kernel(self.resolution, self.cell_n, self.cell_n,) + + def __call__(self, points_all, R, t, pcl_ids, layer_ids, elevation_map, semantic_map, new_map, *args): + self.alpha_kernel( + points_all, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + new_map, + size=(points_all.shape[0]), + ) + # calculate new thetas + sum_alpha = cp.sum(new_map[layer_ids], axis=0) + # do not divide by zero + sum_alpha[sum_alpha == 0] = 1 + semantic_map[layer_ids] = new_map[layer_ids] / cp.expand_dims(sum_alpha, axis=0) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_max.py b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_max.py new file mode 100644 index 00000000..e506571d --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/fusion/pointcloud_class_max.py @@ -0,0 +1,123 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +import string + +from .fusion_manager import FusionBase + + +def sum_max_kernel( + resolution, width, height, +): + # input the list of layers, amount of channels can slo be input through kernel + sum_max_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U max_pt, raw T max_id, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params=" raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U idx = p[i * pcl_channels[0]]; + U valid = p[i * pcl_channels[0] + 1]; + U inside = p[i * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + // for every max value + for ( W it=0;it> 16; + return reds; + } + __device__ unsigned int get_g(unsigned int color){ + unsigned int green = 0xFF00; + unsigned int greens = (color & green) >> 8; + return greens; + } + __device__ unsigned int get_b(unsigned int color){ + unsigned int blue = 0xFF; + unsigned int blues = ( color & blue); + return blues; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid && inside){ + unsigned int color = __float_as_uint(p[id * pcl_channels[0] + pcl_chan[layer]]); + atomicAdd(&color_map[get_map_idx(idx, layer*3)], get_r(color)); + atomicAdd(&color_map[get_map_idx(idx, layer*3+1)], get_g(color)); + atomicAdd(&color_map[get_map_idx(idx, layer*3 + 2)], get_b(color)); + atomicAdd(&color_map[get_map_idx(idx, pcl_channels[1]*3)], 1); + } + """ + ).substitute(width=width), + name="add_color_kernel", + ) + return add_color_kernel + + +def color_average_kernel( + width, height, +): + color_average_kernel = cp.ElementwiseKernel( + in_params="raw V color_map, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ unsigned int get_r(unsigned int color){ + unsigned int red = 0xFF0000; + unsigned int reds = (color & red) >> 16; + return reds; + } + __device__ unsigned int get_g(unsigned int color){ + unsigned int green = 0xFF00; + unsigned int greens = (color & green) >> 8; + return greens; + } + __device__ unsigned int get_b(unsigned int color){ + unsigned int blue = 0xFF; + unsigned int blues = (color & blue); + return blues; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + unsigned int cnt = color_map[get_map_idx(id, pcl_channels[1]*3)]; + if (cnt>0){ + // U prev_color = map[get_map_idx(id, map_lay[layer])]; + unsigned int r = color_map[get_map_idx(id, layer*3)]/(1*cnt); + unsigned int g = color_map[get_map_idx(id, layer*3+1)]/(1*cnt); + unsigned int b = color_map[get_map_idx(id, layer*3+2)]/(1*cnt); + //if (prev_color>=0){ + // unsigned int prev_r = get_r(prev_color); + // unsigned int prev_g = get_g(prev_color); + // unsigned int prev_b = get_b(prev_color); + // unsigned int r = prev_r/2 + color_map[get_map_idx(i, layer*3)]/(2*cnt); + // unsigned int g = prev_g/2 + color_map[get_map_idx(i, layer*3+1)]/(2*cnt); + // unsigned int b = prev_b/2 + color_map[get_map_idx(i, layer*3+2)]/(2*cnt); + //} + unsigned int rgb = (r<<16) + (g << 8) + b; + float rgb_ = __uint_as_float(rgb); + map[get_map_idx(id, map_lay[layer])] = rgb_; + } + """ + ).substitute(), + name="color_average_kernel", + ) + return color_average_kernel + + +class Color(FusionBase): + def __init__(self, params, *args, **kwargs): + # super().__init__(fusion_params, *args, **kwargs) + # print("Initialize fusion kernel") + self.name = "pointcloud_color" + self.cell_n = params.cell_n + self.resolution = params.resolution + + self.add_color_kernel = add_color_kernel(params.cell_n, params.cell_n,) + self.color_average_kernel = color_average_kernel(self.cell_n, self.cell_n) + + def __call__(self, points_all, R, t, pcl_ids, layer_ids, elevation_map, semantic_map, new_map, *args): + self.color_map = cp.zeros((1 + 3 * layer_ids.shape[0], self.cell_n, self.cell_n), dtype=cp.uint32,) + + points_all = points_all.astype(cp.float32) + self.add_color_kernel( + points_all, + R, + t, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + self.color_map, + size=(points_all.shape[0]), + ) + self.color_average_kernel( + self.color_map, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + semantic_map, + size=(self.cell_n * self.cell_n), + ) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/kernels/__init__.py b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/__init__.py new file mode 100644 index 00000000..06eecc38 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/__init__.py @@ -0,0 +1,3 @@ +from .custom_image_kernels import * +from .custom_kernels import * +from .custom_semantic_kernels import * diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_image_kernels.py b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_image_kernels.py new file mode 100644 index 00000000..a020ba2e --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_image_kernels.py @@ -0,0 +1,271 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import string + + +def image_to_map_correspondence_kernel(resolution, width, height, tolerance_z_collision): + """ + This function calculates the correspondence between the image and the map. + It takes in the resolution, width, height, and tolerance_z_collision as parameters. + The function returns a kernel that can be used to perform the correspondence calculation. + """ + _image_to_map_correspondence_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U x1, raw U y1, raw U z1, raw U P, raw U K, raw U D, raw U image_height, raw U image_width, raw U center", + out_params="raw U uv_correspondence, raw B valid_correspondence", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ bool is_inside_map(int x, int y) { + return (x >= 0 && y >= 0 && x<${width} && y<${height}); + } + __device__ float get_l2_distance(int x0, int y0, int x1, int y1) { + float dx = x0-x1; + float dy = y0-y1; + return sqrt( dx*dx + dy*dy); + } + """ + ).substitute(width=width, height=height, resolution=resolution), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + + // return if gridcell has no valid height + if (map[get_map_idx(i, 2)] != 1){ + return; + } + + // get current cell position + int y0 = i % ${width}; + int x0 = i / ${width}; + + // gridcell 3D point in worldframe TODO reverse x and y + float p1 = (x0-(${width}/2)) * ${resolution} + center[0]; + float p2 = (y0-(${height}/2)) * ${resolution} + center[1]; + float p3 = map[cell_idx] + center[2]; + + // reproject 3D point into image plane + float u = p1 * P[0] + p2 * P[1] + p3 * P[2] + P[3]; + float v = p1 * P[4] + p2 * P[5] + p3 * P[6] + P[7]; + float d = p1 * P[8] + p2 * P[9] + p3 * P[10] + P[11]; + + // filter point behind image plane + if (d <= 0) { + return; + } + u = u/d; + v = v/d; + + // Check if D is all zeros + bool is_D_zero = (D[0] == 0 && D[1] == 0 && D[2] == 0 && D[3] == 0 && D[4] == 0); + + // Apply undistortion using distortion matrix D if not all zeros + if (!is_D_zero) { + float k1 = D[0]; + float k2 = D[1]; + float p1 = D[2]; + float p2 = D[3]; + float k3 = D[4]; + float fx = K[0]; + float fy = K[4]; + float cx = K[2]; + float cy = K[5]; + float x = (u - cx) / fx; + float y = (v - cy) / fy; + float r2 = x * x + y * y; + float radial_distortion = 1 + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2; + float u_corrected = x * radial_distortion + 2 * p1 * x * y + p2 * (r2 + 2 * x * x); + float v_corrected = y * radial_distortion + 2 * p2 * x * y + p1 * (r2 + 2 * y * y); + u = fx * u_corrected + cx; + v = fy * v_corrected + cy; + } + + // filter point next to image plane + if ((u < 0) || (v < 0) || (u >= image_width) || (v >= image_height)){ + return; + } + + int y0_c = y0; + int x0_c = x0; + float total_dis = get_l2_distance(x0_c, y0_c, x1,y1); + float z0 = map[cell_idx]; + float delta_z = z1-z0; + + + // bresenham algorithm to iterate over cells in line between camera center and current gridmap cell + // https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm + int dx = abs(x1-x0); + int sx = x0 < x1 ? 1 : -1; + int dy = -abs(y1 - y0); + int sy = y0 < y1 ? 1 : -1; + int error = dx + dy; + + bool is_valid = true; + + // iterate over all cells along line + while (1){ + // assumption we do not need to check the height for camera center cell + if (x0 == x1 && y0 == y1){ + break; + } + + // check if height is invalid + if (is_inside_map(x0,y0)){ + int idx = y0 + (x0 * ${width}); + if (map[get_map_idx(idx, 2)]){ + float dis = get_l2_distance(x0_c, y0_c, x0, y0); + float rayheight = z0 + ( dis / total_dis * delta_z); + if ( map[idx] - ${tolerance_z_collision} > rayheight){ + is_valid = false; + break; + } + } + } + + + // computation of next gridcell index in line + int e2 = 2 * error; + if (e2 >= dy){ + if(x0 == x1){ + break; + } + error = error + dy; + x0 = x0 + sx; + } + if (e2 <= dx){ + if (y0 == y1){ + break; + } + error = error + dx; + y0 = y0 + sy; + } + } + + // mark the correspondence + uv_correspondence[get_map_idx(i, 0)] = u; + uv_correspondence[get_map_idx(i, 1)] = v; + valid_correspondence[get_map_idx(i, 0)] = is_valid; + """ + ).substitute(height=height, width=width, resolution=resolution, tolerance_z_collision=tolerance_z_collision), + name="image_to_map_correspondence_kernel", + ) + return _image_to_map_correspondence_kernel + + +def average_correspondences_to_map_kernel(width, height): + """ + This function calculates the average correspondences to the map. + It takes in the width and height as parameters. + The function returns a kernel that can be used to perform the correspondence calculation. + """ + _average_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_mono, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + int idx = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + new_sem_map[get_map_idx(i, map_idx)] = image_mono[idx]; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + + """ + ).substitute(), + name="average_correspondences_to_map_kernel", + ) + return _average_correspondences_to_map_kernel + + +def exponential_correspondences_to_map_kernel(width, height, alpha): + """ + This function calculates the exponential correspondences to the map. + It takes in the width, height, and alpha as parameters. + The function returns a kernel that can be used to perform the correspondence calculation. + """ + _exponential_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_mono, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + int idx = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)] * (1-${alpha}) + ${alpha} * image_mono[idx]; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + + """ + ).substitute(alpha=alpha), + name="exponential_correspondences_to_map_kernel", + ) + return _exponential_correspondences_to_map_kernel + + +def color_correspondences_to_map_kernel(width, height): + """ + This function calculates the color correspondences to the map. + It takes in the width and height as parameters. + The function returns a kernel that can be used to perform the correspondence calculation. + """ + _color_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_rgb, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + + int idx_red = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + int idx_green = image_width * image_height + idx_red; + int idx_blue = image_width * image_height * 2 + idx_red; + + unsigned int r = image_rgb[idx_red]; + unsigned int g = image_rgb[idx_green]; + unsigned int b = image_rgb[idx_blue]; + + unsigned int rgb = (r<<16) + (g << 8) + b; + float rgb_ = __uint_as_float(rgb); + new_sem_map[get_map_idx(i, map_idx)] = rgb_; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + """ + ).substitute(), + name="color_correspondences_to_map_kernel", + ) + return _color_correspondences_to_map_kernel diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_kernels.py b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_kernels.py new file mode 100644 index 00000000..238a0a57 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_kernels.py @@ -0,0 +1,705 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import string + + +def map_utils( + resolution, + width, + height, + sensor_noise_factor, + min_valid_distance, + max_height_range, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, +): + util_preamble = string.Template( + """ + __device__ float16 clamp(float16 x, float16 min_x, float16 max_x) { + + return max(min(x, max_x), min_x); + } + __device__ int get_x_idx(float16 x, float16 center) { + int i = (x - center) / ${resolution} + 0.5 * ${width}; + return i; + } + __device__ int get_y_idx(float16 y, float16 center) { + int i = (y - center) / ${resolution} + 0.5 * ${height}; + return i; + } + __device__ bool is_inside(int idx) { + // Fixed: Row-Major (Row=Y, Col=X) + // Row index (Y) + int idx_y = idx / ${width}; + // Column index (X) + int idx_x = idx % ${width}; + // Check Col bounds (Width) + if (idx_x == 0 || idx_x == ${width} - 1) { + return false; + } + // Check Row bounds (Height) + if (idx_y == 0 || idx_y == ${height} - 1) { + return false; + } + return true; + } + __device__ int get_idx(float16 x, float16 y, float16 center_x, float16 center_y) { + int idx_x = clamp(get_x_idx(x, center_x), 0, ${width} - 1); + int idx_y = clamp(get_y_idx(y, center_y), 0, ${height} - 1); + // Fixed: Row-Major (Row=Y, Col=X) + return ${width} * idx_y + idx_x; + } + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ float transform_p(float16 x, float16 y, float16 z, + float16 r0, float16 r1, float16 r2, float16 t) { + return r0 * x + r1 * y + r2 * z + t; + } + __device__ float z_noise(float16 z){ + return ${sensor_noise_factor} * z * z; + } + + __device__ float point_sensor_distance(float16 x, float16 y, float16 z, + float16 sx, float16 sy, float16 sz) { + float d = (x - sx) * (x - sx) + (y - sy) * (y - sy) + (z - sz) * (z - sz); + return d; + } + + __device__ bool is_valid(float16 x, float16 y, float16 z, + float16 sx, float16 sy, float16 sz) { + float d = point_sensor_distance(x, y, z, sx, sy, sz); + float dxy = max(sqrt(x * x + y * y) - ${ramped_height_range_b}, 0.0); + if (d < ${min_valid_distance} * ${min_valid_distance}) { + return false; + } + else if (z - sz > dxy * ${ramped_height_range_a} + ${ramped_height_range_c} || z - sz > ${max_height_range}) { + return false; + } + else { + return true; + } + } + + __device__ float ray_vector(float16 tx, float16 ty, float16 tz, + float16 px, float16 py, float16 pz, + float16& rx, float16& ry, float16& rz){ + float16 vx = px - tx; + float16 vy = py - ty; + float16 vz = pz - tz; + float16 norm = sqrt(vx * vx + vy * vy + vz * vz); + if (norm > 0) { + rx = vx / norm; + ry = vy / norm; + rz = vz / norm; + } + else { + rx = 0; + ry = 0; + rz = 0; + } + return norm; + } + + __device__ float inner_product(float16 x1, float16 y1, float16 z1, + float16 x2, float16 y2, float16 z2) { + + float product = (x1 * x2 + y1 * y2 + z1 * z2); + return product; + } + + """ + ).substitute( + resolution=resolution, + width=width, + height=height, + sensor_noise_factor=sensor_noise_factor, + min_valid_distance=min_valid_distance, + max_height_range=max_height_range, + ramped_height_range_a=ramped_height_range_a, + ramped_height_range_b=ramped_height_range_b, + ramped_height_range_c=ramped_height_range_c, + ) + return util_preamble + + +def add_points_kernel( + resolution, + width, + height, + sensor_noise_factor, + mahalanobis_thresh, + outlier_variance, + wall_num_thresh, + max_ray_length, + cleanup_step, + min_valid_distance, + max_height_range, + cleanup_cos_thresh, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, + enable_edge_shaped=True, + enable_visibility_cleanup=True, +): + add_points_kernel = cp.ElementwiseKernel( + in_params="raw U center_x, raw U center_y, raw U R, raw U t, raw U norm_map", + out_params="raw U p, raw U map, raw T newmap", + preamble=map_utils( + resolution, + width, + height, + sensor_noise_factor, + min_valid_distance, + max_height_range, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, + ), + operation=string.Template( + """ + U rx = p[i * 3]; + U ry = p[i * 3 + 1]; + U rz = p[i * 3 + 2]; + U x = transform_p(rx, ry, rz, R[0], R[1], R[2], t[0]); + U y = transform_p(rx, ry, rz, R[3], R[4], R[5], t[1]); + U z = transform_p(rx, ry, rz, R[6], R[7], R[8], t[2]); + U v = z_noise(rz); + int idx = get_idx(x, y, center_x[0], center_y[0]); + if (is_valid(x, y, z, t[0], t[1], t[2])) { + if (is_inside(idx)) { + U map_h = map[get_map_idx(idx, 0)]; + U map_v = map[get_map_idx(idx, 1)]; + U num_points = newmap[get_map_idx(idx, 4)]; + if (abs(map_h - z) > (map_v * ${mahalanobis_thresh})) { + atomicAdd(&map[get_map_idx(idx, 1)], ${outlier_variance}); + } + else { + if (${enable_edge_shaped} && (num_points > ${wall_num_thresh}) && (z < map_h - map_v * ${mahalanobis_thresh} / num_points)) { + // continue; + } + else { + T new_h = (map_h * v + z * map_v) / (map_v + v); + T new_v = (map_v * v) / (map_v + v); + atomicAdd(&newmap[get_map_idx(idx, 0)], new_h); + atomicAdd(&newmap[get_map_idx(idx, 1)], new_v); + atomicAdd(&newmap[get_map_idx(idx, 2)], 1.0); + // is Valid + map[get_map_idx(idx, 2)] = 1; + // Time layer + map[get_map_idx(idx, 4)] = 0.0; + // Upper bound + map[get_map_idx(idx, 5)] = new_h; + map[get_map_idx(idx, 6)] = 0.0; + } + // visibility cleanup + } + } + } + if (${enable_visibility_cleanup}) { + float16 ray_x, ray_y, ray_z; + float16 ray_length = ray_vector(t[0], t[1], t[2], x, y, z, ray_x, ray_y, ray_z); + ray_length = min(ray_length, (float16)${max_ray_length}); + int last_nidx = -1; + for (float16 s=${ray_step}; s < ray_length; s+=${ray_step}) { + // iterate through ray + U nx = t[0] + ray_x * s; + U ny = t[1] + ray_y * s; + U nz = t[2] + ray_z * s; + int nidx = get_idx(nx, ny, center_x[0], center_y[0]); + if (last_nidx == nidx) {continue;} // Skip if we're still in the same cell + else {last_nidx = nidx;} + if (!is_inside(nidx)) {continue;} + + U nmap_h = map[get_map_idx(nidx, 0)]; + U nmap_v = map[get_map_idx(nidx, 1)]; + U nmap_valid = map[get_map_idx(nidx, 2)]; + // traversability + U nmap_trav = map[get_map_idx(nidx, 3)]; + // Time layer + U non_updated_t = map[get_map_idx(nidx, 4)]; + // upper bound + U nmap_upper = map[get_map_idx(nidx, 5)]; + U nmap_is_upper = map[get_map_idx(nidx, 6)]; + + // If point is close or is farther away than ray length, skip. + float16 d = (x - nx) * (x - nx) + (y - ny) * (y - ny) + (z - nz) * (z - nz); + if (d < 0.1 || !is_valid(x, y, z, t[0], t[1], t[2])) {continue;} + + // If invalid, do upper bound check, then skip + if (nmap_valid < 0.5) { + if (nz < nmap_upper || nmap_is_upper < 0.5) { + map[get_map_idx(nidx, 5)] = nz; + map[get_map_idx(nidx, 6)] = 1.0f; + } + continue; + } + // If updated recently, skip + if (non_updated_t < 0.5) {continue;} + + if (nmap_h > nz + 0.01 - min(nmap_v, 1.0) * 0.05) { + // If ray and norm is vertical, skip + U norm_x = norm_map[get_map_idx(nidx, 0)]; + U norm_y = norm_map[get_map_idx(nidx, 1)]; + U norm_z = norm_map[get_map_idx(nidx, 2)]; + float product = inner_product(ray_x, ray_y, ray_z, norm_x, norm_y, norm_z); + if (fabs(product) < ${cleanup_cos_thresh}) {continue;} + U num_points = newmap[get_map_idx(nidx, 3)]; + if (num_points > ${wall_num_thresh} && non_updated_t < 1.0) {continue;} + + // Finally, this cell is penetrated by the ray. + atomicAdd(&map[get_map_idx(nidx, 2)], -${cleanup_step}/(ray_length / ${max_ray_length})); + atomicAdd(&map[get_map_idx(nidx, 1)], ${outlier_variance}); + // Do upper bound check. + if (nz < nmap_upper || nmap_is_upper < 0.5) { + map[get_map_idx(nidx, 5)] = nz; + map[get_map_idx(nidx, 6)] = 1.0f; + } + } + } + } + p[i * 3]= idx; + p[i * 3 + 1] = is_valid(x, y, z, t[0], t[1], t[2]); + p[i * 3 + 2] = is_inside(idx); + """ + ).substitute( + mahalanobis_thresh=mahalanobis_thresh, + outlier_variance=outlier_variance, + wall_num_thresh=wall_num_thresh, + ray_step=resolution / 2 ** 0.5, + max_ray_length=max_ray_length, + cleanup_step=cleanup_step, + cleanup_cos_thresh=cleanup_cos_thresh, + enable_edge_shaped=int(enable_edge_shaped), + enable_visibility_cleanup=int(enable_visibility_cleanup), + ), + name="add_points_kernel", + ) + return add_points_kernel + + +def error_counting_kernel( + resolution, + width, + height, + sensor_noise_factor, + mahalanobis_thresh, + outlier_variance, + traversability_inlier, + min_valid_distance, + max_height_range, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, +): + error_counting_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U p, raw U center_x, raw U center_y, raw U R, raw U t", + out_params="raw U newmap, raw T error, raw T error_cnt", + preamble=map_utils( + resolution, + width, + height, + sensor_noise_factor, + min_valid_distance, + max_height_range, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, + ), + operation=string.Template( + """ + U rx = p[i * 3]; + U ry = p[i * 3 + 1]; + U rz = p[i * 3 + 2]; + U x = transform_p(rx, ry, rz, R[0], R[1], R[2], t[0]); + U y = transform_p(rx, ry, rz, R[3], R[4], R[5], t[1]); + U z = transform_p(rx, ry, rz, R[6], R[7], R[8], t[2]); + U v = z_noise(rz); + // if (!is_valid(z, t[2])) {return;} + if (!is_valid(x, y, z, t[0], t[1], t[2])) {return;} + // if ((x - t[0]) * (x - t[0]) + (y - t[1]) * (y - t[1]) + (z - t[2]) * (z - t[2]) < 0.5) {return;} + int idx = get_idx(x, y, center_x[0], center_y[0]); + if (!is_inside(idx)) { + return; + } + U map_h = map[get_map_idx(idx, 0)]; + U map_v = map[get_map_idx(idx, 1)]; + U map_valid = map[get_map_idx(idx, 2)]; + U map_t = map[get_map_idx(idx, 3)]; + if (map_valid > 0.5 && (abs(map_h - z) < (map_v * ${mahalanobis_thresh})) + && map_v < ${outlier_variance} / 2.0 + && map_t > ${traversability_inlier}) { + T e = z - map_h; + atomicAdd(&error[0], e); + atomicAdd(&error_cnt[0], 1); + atomicAdd(&newmap[get_map_idx(idx, 3)], 1.0); + } + atomicAdd(&newmap[get_map_idx(idx, 4)], 1.0); + """ + ).substitute( + mahalanobis_thresh=mahalanobis_thresh, + outlier_variance=outlier_variance, + traversability_inlier=traversability_inlier, + ), + name="error_counting_kernel", + ) + return error_counting_kernel + + +def average_map_kernel(width, height, max_variance, initial_variance): + average_map_kernel = cp.ElementwiseKernel( + in_params="raw U newmap", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U h = map[get_map_idx(i, 0)]; + U v = map[get_map_idx(i, 1)]; + U valid = map[get_map_idx(i, 2)]; + U new_h = newmap[get_map_idx(i, 0)]; + U new_v = newmap[get_map_idx(i, 1)]; + U new_cnt = newmap[get_map_idx(i, 2)]; + if (new_cnt > 0) { + if (new_v / new_cnt > ${max_variance}) { + map[get_map_idx(i, 0)] = 0; + map[get_map_idx(i, 1)] = ${initial_variance}; + map[get_map_idx(i, 2)] = 0; + } + else { + map[get_map_idx(i, 0)] = new_h / new_cnt; + map[get_map_idx(i, 1)] = new_v / new_cnt; + map[get_map_idx(i, 2)] = 1; + } + } + if (valid < 0.5) { + map[get_map_idx(i, 0)] = 0; + map[get_map_idx(i, 1)] = ${initial_variance}; + map[get_map_idx(i, 2)] = 0; + } + """ + ).substitute(max_variance=max_variance, initial_variance=initial_variance), + name="average_map_kernel", + ) + return average_map_kernel + + +def dilation_filter_kernel(width, height, dilation_size): + dilation_filter_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U mask", + out_params="raw U newmap, raw U newmask", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ int get_relative_map_idx(int idx, int dx, int dy, int layer_n) { + const int layer = ${width} * ${height}; + const int relative_idx = idx + ${width} * dy + dx; + return layer * layer_n + relative_idx; + } + __device__ bool is_inside(int idx) { + // Fixed: Row-Major (Row=Y, Col=X) + // Row index (Y) + int idx_y = idx / ${width}; + // Column index (X) + int idx_x = idx % ${width}; + // Check Col bounds (Width) + if (idx_x <= 0 || idx_x >= ${width} - 1) { + return false; + } + // Check Row bounds (Height) + if (idx_y <= 0 || idx_y >= ${height} - 1) { + return false; + } + return true; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U h = map[get_map_idx(i, 0)]; + U valid = mask[get_map_idx(i, 0)]; + newmap[get_map_idx(i, 0)] = h; + if (valid < 0.5) { + U distance = 100; + U near_value = 0; + for (int dy = -${dilation_size}; dy <= ${dilation_size}; dy++) { + for (int dx = -${dilation_size}; dx <= ${dilation_size}; dx++) { + int idx = get_relative_map_idx(i, dx, dy, 0); + if (!is_inside(idx)) {continue;} + U valid = mask[idx]; + if(valid > 0.5 && dx + dy < distance) { + distance = dx + dy; + near_value = map[idx]; + } + } + } + if(distance < 100) { + newmap[get_map_idx(i, 0)] = near_value; + newmask[get_map_idx(i, 0)] = 1.0; + } + } + """ + ).substitute(dilation_size=dilation_size), + name="dilation_filter_kernel", + ) + return dilation_filter_kernel + + +def normal_filter_kernel(width, height, resolution): + normal_filter_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U mask", + out_params="raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ int get_relative_map_idx(int idx, int dx, int dy, int layer_n) { + const int layer = ${width} * ${height}; + const int relative_idx = idx + ${width} * dy + dx; + return layer * layer_n + relative_idx; + } + __device__ bool is_inside(int idx) { + // Fixed: Row-Major (Row=Y, Col=X) + // Row index (Y) + int idx_y = idx / ${width}; + // Column index (X) + int idx_x = idx % ${width}; + // Check Col bounds (Width) + if (idx_x <= 0 || idx_x >= ${width} - 1) { + return false; + } + // Check Row bounds (Height) + if (idx_y <= 0 || idx_y >= ${height} - 1) { + return false; + } + return true; + } + __device__ float resolution() { + return ${resolution}; + } + """ + ).substitute(width=width, height=height, resolution=resolution), + operation=string.Template( + """ + U h = map[get_map_idx(i, 0)]; + U valid = mask[get_map_idx(i, 0)]; + if (valid > 0.5) { + int idx_x = get_relative_map_idx(i, 1, 0, 0); + int idx_y = get_relative_map_idx(i, 0, 1, 0); + if (!is_inside(idx_x) || !is_inside(idx_y)) { return; } + float dzdx = (map[idx_x] - h); + float dzdy = (map[idx_y] - h); + // Fixed: Normal = (-dH/dx, -dH/dy, 1) + float nx = -dzdx / resolution(); + float ny = -dzdy / resolution(); + float nz = 1; + float norm = sqrt((nx * nx) + (ny * ny) + 1); + newmap[get_map_idx(i, 0)] = nx / norm; + newmap[get_map_idx(i, 1)] = ny / norm; + newmap[get_map_idx(i, 2)] = nz / norm; + } + """ + ).substitute(), + name="normal_filter_kernel", + ) + return normal_filter_kernel + + +def polygon_mask_kernel(width, height, resolution): + polygon_mask_kernel = cp.ElementwiseKernel( + in_params="raw U polygon, raw U center_x, raw U center_y, raw int16 polygon_n, raw U polygon_bbox", + out_params="raw U mask", + preamble=string.Template( + """ + __device__ struct Point + { + int x; + int y; + }; + // Given three colinear points p, q, r, the function checks if + // point q lies on line segment 'pr' + __device__ bool onSegment(Point p, Point q, Point r) + { + if (q.x <= max(p.x, r.x) && q.x >= min(p.x, r.x) && + q.y <= max(p.y, r.y) && q.y >= min(p.y, r.y)) + return true; + return false; + } + // To find orientation of ordered triplet (p, q, r). + // The function returns following values + // 0 --> p, q and r are colinear + // 1 --> Clockwise + // 2 --> Counterclockwise + __device__ int orientation(Point p, Point q, Point r) + { + int val = (q.y - p.y) * (r.x - q.x) - + (q.x - p.x) * (r.y - q.y); + if (val == 0) return 0; // colinear + return (val > 0)? 1: 2; // clock or counterclock wise + } + // The function that returns true if line segment 'p1q1' + // and 'p2q2' intersect. + __device__ bool doIntersect(Point p1, Point q1, Point p2, Point q2) + { + // Find the four orientations needed for general and + // special cases + int o1 = orientation(p1, q1, p2); + int o2 = orientation(p1, q1, q2); + int o3 = orientation(p2, q2, p1); + int o4 = orientation(p2, q2, q1); + // General case + if (o1 != o2 && o3 != o4) + return true; + // Special Cases + // p1, q1 and p2 are colinear and p2 lies on segment p1q1 + if (o1 == 0 && onSegment(p1, p2, q1)) return true; + // p1, q1 and p2 are colinear and q2 lies on segment p1q1 + if (o2 == 0 && onSegment(p1, q2, q1)) return true; + // p2, q2 and p1 are colinear and p1 lies on segment p2q2 + if (o3 == 0 && onSegment(p2, p1, q2)) return true; + // p2, q2 and q1 are colinear and q1 lies on segment p2q2 + if (o4 == 0 && onSegment(p2, q1, q2)) return true; + return false; // Doesn't fall in any of the above cases + } + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ int get_idx_x(int idx){ + // Fixed: Column index (X) + int idx_x = idx % ${width}; + return idx_x; + } + + __device__ int get_idx_y(int idx){ + // Fixed: Row index (Y) + int idx_y = idx / ${width}; + return idx_y; + } + + __device__ float16 clamp(float16 x, float16 min_x, float16 max_x) { + + return max(min(x, max_x), min_x); + } + __device__ float16 round(float16 x) { + return (int)x + (int)(2 * (x - (int)x)); + } + __device__ int get_x_idx(float16 x, float16 center) { + const float resolution = ${resolution}; + const float width = ${width}; + int i = (x - center) / resolution + 0.5 * width; + return i; + } + __device__ int get_y_idx(float16 y, float16 center) { + const float resolution = ${resolution}; + const float height = ${height}; + int i = (y - center) / resolution + 0.5 * height; + return i; + } + __device__ int get_idx(float16 x, float16 y, float16 center_x, float16 center_y) { + int idx_x = clamp(get_x_idx(x, center_x), 0, ${width} - 1); + int idx_y = clamp(get_y_idx(y, center_y), 0, ${height} - 1); + // Fixed: Row-Major (Row=Y, Col=X) + return ${width} * idx_y + idx_x; + } + + """ + ).substitute(width=width, height=height, resolution=resolution), + operation=string.Template( + """ + // Point p = {get_idx_x(i, center_x[0]), get_idx_y(i, center_y[0])}; + Point p = {get_idx_x(i), get_idx_y(i)}; + Point extreme = {100000, p.y}; + int bbox_min_idx = get_idx(polygon_bbox[0], polygon_bbox[1], center_x[0], center_y[0]); + int bbox_max_idx = get_idx(polygon_bbox[2], polygon_bbox[3], center_x[0], center_y[0]); + Point bmin = {get_idx_x(bbox_min_idx), get_idx_y(bbox_min_idx)}; + Point bmax = {get_idx_x(bbox_max_idx), get_idx_y(bbox_max_idx)}; + if (p.x < bmin.x || p.x > bmax.x || p.y < bmin.y || p.y > bmax.y){ + mask[i] = 0; + return; + } + else { + int intersect_cnt = 0; + for (int j = 0; j < polygon_n[0]; j++) { + Point p1, p2; + int i1 = get_idx(polygon[j * 2 + 0], polygon[j * 2 + 1], center_x[0], center_y[0]); + p1.x = get_idx_x(i1); + p1.y = get_idx_y(i1); + int j2 = (j + 1) % polygon_n[0]; + int i2 = get_idx(polygon[j2 * 2 + 0], polygon[j2 * 2 + 1], center_x[0], center_y[0]); + p2.x = get_idx_x(i2); + p2.y = get_idx_y(i2); + if (doIntersect(p1, p2, p, extreme)) + { + // If the point 'p' is colinear with line segment 'i-next', + // then check if it lies on segment. If it lies, return true, + // otherwise false + if (orientation(p1, p, p2) == 0) { + if (onSegment(p1, p, p2)){ + mask[i] = 1; + return; + } + } + else if(((p1.y <= p.y) && (p2.y > p.y)) || ((p1.y > p.y) && (p2.y <= p.y))){ + intersect_cnt++; + } + } + } + if (intersect_cnt % 2 == 0) { mask[i] = 0; } + else { mask[i] = 1; } + } + """ + ).substitute(a=1), + name="polygon_mask_kernel", + ) + return polygon_mask_kernel + + +if __name__ == "__main__": + for i in range(10): + import random + + a = cp.zeros((100, 100)) + n = random.randint(3, 5) + + # polygon = cp.array([[-1, -1], [3, 4], [2, 4], [1, 3]], dtype=float) + polygon = cp.array( + [[(random.random() - 0.5) * 10, (random.random() - 0.5) * 10] for i in range(n)], dtype=float + ) + print(polygon) + polygon_min = polygon.min(axis=0) + polygon_max = polygon.max(axis=0) + polygon_bbox = cp.concatenate([polygon_min, polygon_max]).flatten() + polygon_n = polygon.shape[0] + print(polygon_bbox) + # polygon_bbox = cp.array([-5, -5, 5, 5], dtype=float) + polygon_mask = polygon_mask_kernel(100, 100, 0.1) + import time + + start = time.time() + polygon_mask(polygon, 0.0, 0.0, polygon_n, polygon_bbox, a, size=(100 * 100)) + print(time.time() - start) + import pylab as plt + + print(a) + plt.imshow(cp.asnumpy(a)) + plt.show() diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_semantic_kernels.py b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_semantic_kernels.py new file mode 100644 index 00000000..26db74c0 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/custom_semantic_kernels.py @@ -0,0 +1,375 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import string + + +def sum_kernel( + resolution, width, height, +): + """Sums the semantic values of the classes for the exponentiala verage or for the average. + + Args: + resolution: + width: + height: + + Returns: + + """ + # input the list of layers, amount of channels can slo be input through kernel + sum_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw U map, raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + U feat = p[id * pcl_channels[0] + pcl_chan[layer]]; + atomicAdd(&newmap[get_map_idx(idx, map_lay[layer])], feat); + } + } + """ + ).substitute(), + name="sum_kernel", + ) + return sum_kernel + + +def sum_compact_kernel( + resolution, width, height, +): + # input the list of layers, amount of channels can slo be input through kernel + sum_compact_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params=" raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + U feat = p[id * pcl_channels[0] + pcl_chan[layer]]; + atomicAdd(&newmap[get_map_idx(idx, layer)], feat); + } + } + """ + ).substitute(), + name="sum_compact_kernel", + ) + return sum_compact_kernel + + +def sum_max_kernel( + resolution, width, height, +): + # input the list of layers, amount of channels can slo be input through kernel + sum_max_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U max_pt, raw T max_id, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params=" raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U idx = p[i * pcl_channels[0]]; + U valid = p[i * pcl_channels[0] + 1]; + U inside = p[i * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + // for every max value + for ( W it=0;it=theta_max){ + arg_max = map_lay[layer]; + theta_max = theta; + } + atomicAdd(&newmap[get_map_idx(idx, arg_max)], theta_max); + } + } + """ + ).substitute(), + name="alpha_kernel", + ) + return alpha_kernel + + +def average_kernel( + width, height, +): + average_kernel = cp.ElementwiseKernel( + in_params="raw V newmap, raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U feat = newmap[get_map_idx(id, map_lay[layer])]/(1*cnt); + map[get_map_idx(id, map_lay[layer])] = feat; + } + """ + ).substitute(), + name="average_map_kernel", + ) + return average_kernel + + +def bayesian_inference_kernel( + width, height, +): + bayesian_inference_kernel = cp.ElementwiseKernel( + in_params=" raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U newmap, raw U sum_mean, raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U feat_ml = sum_mean[get_map_idx(id, layer)]/cnt; + U feat_old = map[get_map_idx(id, map_lay[layer])]; + U sigma_old = newmap[get_map_idx(id, map_lay[layer])]; + U sigma = 1.0; + U feat_new = sigma*feat_old /(cnt*sigma_old + sigma) +cnt*sigma_old *feat_ml /(cnt*sigma_old+sigma); + U sigma_new = sigma*sigma_old /(cnt*sigma_old +sigma); + map[get_map_idx(id, map_lay[layer])] = feat_new; + newmap[get_map_idx(id, map_lay[layer])] = sigma_new; + } + """ + ).substitute(), + name="bayesian_inference_kernel", + ) + return bayesian_inference_kernel + + +def class_average_kernel( + width, height, alpha, +): + class_average_kernel = cp.ElementwiseKernel( + in_params="raw V newmap, raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U prev_val = map[get_map_idx(id, map_lay[layer])]; + if (prev_val==0){ + U val = newmap[get_map_idx(id, map_lay[layer])]/(1*cnt); + map[get_map_idx(id, map_lay[layer])] = val; + } + else{ + U val = ${alpha} *prev_val + (1-${alpha}) * newmap[get_map_idx(id, map_lay[layer])]/(cnt); + map[get_map_idx(id, map_lay[layer])] = val; + } + } + """ + ).substitute(alpha=alpha,), + name="class_average_kernel", + ) + return class_average_kernel + + +def add_color_kernel( + width, height, +): + add_color_kernel = cp.ElementwiseKernel( + in_params="raw T p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw V color_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ unsigned int get_r(unsigned int color){ + unsigned int red = 0xFF0000; + unsigned int reds = (color & red) >> 16; + return reds; + } + __device__ unsigned int get_g(unsigned int color){ + unsigned int green = 0xFF00; + unsigned int greens = (color & green) >> 8; + return greens; + } + __device__ unsigned int get_b(unsigned int color){ + unsigned int blue = 0xFF; + unsigned int blues = ( color & blue); + return blues; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid && inside){ + unsigned int color = __float_as_uint(p[id * pcl_channels[0] + pcl_chan[layer]]); + atomicAdd(&color_map[get_map_idx(idx, layer*3)], get_r(color)); + atomicAdd(&color_map[get_map_idx(idx, layer*3+1)], get_g(color)); + atomicAdd(&color_map[get_map_idx(idx, layer*3 + 2)], get_b(color)); + atomicAdd(&color_map[get_map_idx(idx, pcl_channels[1]*3)], 1); + } + """ + ).substitute(width=width), + name="add_color_kernel", + ) + return add_color_kernel + + +def color_average_kernel( + width, height, +): + color_average_kernel = cp.ElementwiseKernel( + in_params="raw V color_map, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ unsigned int get_r(unsigned int color){ + unsigned int red = 0xFF0000; + unsigned int reds = (color & red) >> 16; + return reds; + } + __device__ unsigned int get_g(unsigned int color){ + unsigned int green = 0xFF00; + unsigned int greens = (color & green) >> 8; + return greens; + } + __device__ unsigned int get_b(unsigned int color){ + unsigned int blue = 0xFF; + unsigned int blues = (color & blue); + return blues; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + unsigned int cnt = color_map[get_map_idx(id, pcl_channels[1]*3)]; + if (cnt>0){ + // U prev_color = map[get_map_idx(id, map_lay[layer])]; + unsigned int r = color_map[get_map_idx(id, layer*3)]/(1*cnt); + unsigned int g = color_map[get_map_idx(id, layer*3+1)]/(1*cnt); + unsigned int b = color_map[get_map_idx(id, layer*3+2)]/(1*cnt); + //if (prev_color>=0){ + // unsigned int prev_r = get_r(prev_color); + // unsigned int prev_g = get_g(prev_color); + // unsigned int prev_b = get_b(prev_color); + // unsigned int r = prev_r/2 + color_map[get_map_idx(i, layer*3)]/(2*cnt); + // unsigned int g = prev_g/2 + color_map[get_map_idx(i, layer*3+1)]/(2*cnt); + // unsigned int b = prev_b/2 + color_map[get_map_idx(i, layer*3+2)]/(2*cnt); + //} + unsigned int rgb = (r<<16) + (g << 8) + b; + float rgb_ = __uint_as_float(rgb); + map[get_map_idx(id, map_lay[layer])] = rgb_; + } + """ + ).substitute(), + name="color_average_kernel", + ) + return color_average_kernel diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/kernels/kk.py b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/kk.py new file mode 100644 index 00000000..27562dda --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/kernels/kk.py @@ -0,0 +1,1290 @@ +# +# Copyright (c) 2023, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import string + + +def sum_kernel( + resolution, width, height, +): + """Sums the semantic values of the classes for the exponentiala verage or for the average. + + Args: + resolution: + width: + height: + + Returns: + + """ + # input the list of layers, amount of channels can slo be input through kernel + sum_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw U map, raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + U feat = p[id * pcl_channels[0] + pcl_chan[layer]]; + atomicAdd(&newmap[get_map_idx(idx, map_lay[layer])], feat); + } + } + """ + ).substitute(), + name="sum_kernel", + ) + return sum_kernel + + +def sum_compact_kernel( + resolution, width, height, +): + # input the list of layers, amount of channels can slo be input through kernel + sum_compact_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params=" raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + U feat = p[id * pcl_channels[0] + pcl_chan[layer]]; + atomicAdd(&newmap[get_map_idx(idx, layer)], feat); + } + } + """ + ).substitute(), + name="sum_compact_kernel", + ) + return sum_compact_kernel + + +def sum_max_kernel( + resolution, width, height, +): + # input the list of layers, amount of channels can slo be input through kernel + sum_max_kernel = cp.ElementwiseKernel( + in_params="raw U p, raw U max_pt, raw T max_id, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params=" raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(resolution=resolution, width=width, height=height), + operation=string.Template( + """ + U idx = p[i * pcl_channels[0]]; + U valid = p[i * pcl_channels[0] + 1]; + U inside = p[i * pcl_channels[0] + 2]; + if (valid) { + if (inside) { + // for every max value + for ( W it=0;it=theta_max){ + arg_max = map_lay[layer]; + theta_max = theta; + } + atomicAdd(&newmap[get_map_idx(idx, arg_max)], theta_max); + } + } + """ + ).substitute(), + name="alpha_kernel", + ) + return alpha_kernel + + +def average_kernel( + width, height, +): + average_kernel = cp.ElementwiseKernel( + in_params="raw V newmap, raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U feat = newmap[get_map_idx(id, map_lay[layer])]/(1*cnt); + map[get_map_idx(id, map_lay[layer])] = feat; + } + """ + ).substitute(), + name="average_map_kernel", + ) + return average_kernel + + +def bayesian_inference_kernel( + width, height, +): + bayesian_inference_kernel = cp.ElementwiseKernel( + in_params=" raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U newmap, raw U sum_mean, raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U feat_ml = sum_mean[get_map_idx(id, layer)]/cnt; + U feat_old = map[get_map_idx(id, map_lay[layer])]; + U sigma_old = newmap[get_map_idx(id, map_lay[layer])]; + U sigma = 1.0; + U feat_new = sigma*feat_old /(cnt*sigma_old + sigma) +cnt*sigma_old *feat_ml /(cnt*sigma_old+sigma); + U sigma_new = sigma*sigma_old /(cnt*sigma_old +sigma); + map[get_map_idx(id, map_lay[layer])] = feat_new; + newmap[get_map_idx(id, map_lay[layer])] = sigma_new; + } + """ + ).substitute(), + name="bayesian_inference_kernel", + ) + return bayesian_inference_kernel + + +def class_average_kernel( + width, height, alpha, +): + class_average_kernel = cp.ElementwiseKernel( + in_params="raw V newmap, raw W pcl_chan, raw W map_lay, raw W pcl_channels, raw U new_elmap", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U cnt = new_elmap[get_map_idx(id, 2)]; + if (cnt>0){ + U prev_val = map[get_map_idx(id, map_lay[layer])]; + if (prev_val==0){ + U val = newmap[get_map_idx(id, map_lay[layer])]/(1*cnt); + map[get_map_idx(id, map_lay[layer])] = val; + } + else{ + U val = ${alpha} *prev_val + (1-${alpha}) * newmap[get_map_idx(id, map_lay[layer])]/(cnt); + map[get_map_idx(id, map_lay[layer])] = val; + } + } + """ + ).substitute(alpha=alpha,), + name="class_average_kernel", + ) + return class_average_kernel + + +def add_color_kernel( + width, height, +): + add_color_kernel = cp.ElementwiseKernel( + in_params="raw T p, raw U R, raw U t, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw V color_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ unsigned int get_r(unsigned int color){ + unsigned int red = 0xFF0000; + unsigned int reds = (color & red) >> 16; + return reds; + } + __device__ unsigned int get_g(unsigned int color){ + unsigned int green = 0xFF00; + unsigned int greens = (color & green) >> 8; + return greens; + } + __device__ unsigned int get_b(unsigned int color){ + unsigned int blue = 0xFF; + unsigned int blues = ( color & blue); + return blues; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + U idx = p[id * pcl_channels[0]]; + U valid = p[id * pcl_channels[0] + 1]; + U inside = p[id * pcl_channels[0] + 2]; + if (valid && inside){ + unsigned int color = __float_as_uint(p[id * pcl_channels[0] + pcl_chan[layer]]); + atomicAdd(&color_map[get_map_idx(idx, layer*3)], get_r(color)); + atomicAdd(&color_map[get_map_idx(idx, layer*3+1)], get_g(color)); + atomicAdd(&color_map[get_map_idx(idx, layer*3 + 2)], get_b(color)); + atomicAdd(&color_map[get_map_idx(idx, pcl_channels[1]*3)], 1); + } + """ + ).substitute(width=width), + name="add_color_kernel", + ) + return add_color_kernel + + +def color_average_kernel( + width, height, +): + color_average_kernel = cp.ElementwiseKernel( + in_params="raw V color_map, raw W pcl_chan, raw W map_lay, raw W pcl_channels", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ unsigned int get_r(unsigned int color){ + unsigned int red = 0xFF0000; + unsigned int reds = (color & red) >> 16; + return reds; + } + __device__ unsigned int get_g(unsigned int color){ + unsigned int green = 0xFF00; + unsigned int greens = (color & green) >> 8; + return greens; + } + __device__ unsigned int get_b(unsigned int color){ + unsigned int blue = 0xFF; + unsigned int blues = (color & blue); + return blues; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U id = floorf(i/pcl_channels[1]); + int layer = i % pcl_channels[1]; + unsigned int cnt = color_map[get_map_idx(id, pcl_channels[1]*3)]; + if (cnt>0){ + // U prev_color = map[get_map_idx(id, map_lay[layer])]; + unsigned int r = color_map[get_map_idx(id, layer*3)]/(1*cnt); + unsigned int g = color_map[get_map_idx(id, layer*3+1)]/(1*cnt); + unsigned int b = color_map[get_map_idx(id, layer*3+2)]/(1*cnt); + //if (prev_color>=0){ + // unsigned int prev_r = get_r(prev_color); + // unsigned int prev_g = get_g(prev_color); + // unsigned int prev_b = get_b(prev_color); + // unsigned int r = prev_r/2 + color_map[get_map_idx(i, layer*3)]/(2*cnt); + // unsigned int g = prev_g/2 + color_map[get_map_idx(i, layer*3+1)]/(2*cnt); + // unsigned int b = prev_b/2 + color_map[get_map_idx(i, layer*3+2)]/(2*cnt); + //} + unsigned int rgb = (r<<16) + (g << 8) + b; + float rgb_ = __uint_as_float(rgb); + map[get_map_idx(id, map_lay[layer])] = rgb_; + } + """ + ).substitute(), + name="color_average_kernel", + ) + return color_average_kernel + + + +def map_utils( + resolution, + width, + height, + sensor_noise_factor, + min_valid_distance, + max_height_range, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, +): + util_preamble = string.Template( + """ + __device__ float16 clamp(float16 x, float16 min_x, float16 max_x) { + + return max(min(x, max_x), min_x); + } + __device__ int get_x_idx(float16 x, float16 center) { + int i = (x - center) / ${resolution} + 0.5 * ${width}; + return i; + } + __device__ int get_y_idx(float16 y, float16 center) { + int i = (y - center) / ${resolution} + 0.5 * ${height}; + return i; + } + __device__ bool is_inside(int idx) { + int idx_x = idx / ${width}; + int idx_y = idx % ${width}; + if (idx_x == 0 || idx_x == ${width} - 1) { + return false; + } + if (idx_y == 0 || idx_y == ${height} - 1) { + return false; + } + return true; + } + __device__ int get_idx(float16 x, float16 y, float16 center_x, float16 center_y) { + int idx_x = clamp(get_x_idx(x, center_x), 0, ${width} - 1); + int idx_y = clamp(get_y_idx(y, center_y), 0, ${height} - 1); + return ${width} * idx_x + idx_y; + } + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ float transform_p(float16 x, float16 y, float16 z, + float16 r0, float16 r1, float16 r2, float16 t) { + return r0 * x + r1 * y + r2 * z + t; + } + __device__ float z_noise(float16 z){ + return ${sensor_noise_factor} * z * z; + } + + __device__ float point_sensor_distance(float16 x, float16 y, float16 z, + float16 sx, float16 sy, float16 sz) { + float d = (x - sx) * (x - sx) + (y - sy) * (y - sy) + (z - sz) * (z - sz); + return d; + } + + __device__ bool is_valid(float16 x, float16 y, float16 z, + float16 sx, float16 sy, float16 sz) { + float d = point_sensor_distance(x, y, z, sx, sy, sz); + float dxy = max(sqrt(x * x + y * y) - ${ramped_height_range_b}, 0.0); + if (d < ${min_valid_distance} * ${min_valid_distance}) { + return false; + } + else if (z - sz > dxy * ${ramped_height_range_a} + ${ramped_height_range_c} || z - sz > ${max_height_range}) { + return false; + } + else { + return true; + } + } + + __device__ float ray_vector(float16 tx, float16 ty, float16 tz, + float16 px, float16 py, float16 pz, + float16& rx, float16& ry, float16& rz){ + float16 vx = px - tx; + float16 vy = py - ty; + float16 vz = pz - tz; + float16 norm = sqrt(vx * vx + vy * vy + vz * vz); + if (norm > 0) { + rx = vx / norm; + ry = vy / norm; + rz = vz / norm; + } + else { + rx = 0; + ry = 0; + rz = 0; + } + return norm; + } + + __device__ float inner_product(float16 x1, float16 y1, float16 z1, + float16 x2, float16 y2, float16 z2) { + + float product = (x1 * x2 + y1 * y2 + z1 * z2); + return product; + } + + """ + ).substitute( + resolution=resolution, + width=width, + height=height, + sensor_noise_factor=sensor_noise_factor, + min_valid_distance=min_valid_distance, + max_height_range=max_height_range, + ramped_height_range_a=ramped_height_range_a, + ramped_height_range_b=ramped_height_range_b, + ramped_height_range_c=ramped_height_range_c, + ) + return util_preamble + + +def add_points_kernel( + resolution, + width, + height, + sensor_noise_factor, + mahalanobis_thresh, + outlier_variance, + wall_num_thresh, + max_ray_length, + cleanup_step, + min_valid_distance, + max_height_range, + cleanup_cos_thresh, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, + enable_edge_shaped=True, + enable_visibility_cleanup=True, +): + add_points_kernel = cp.ElementwiseKernel( + in_params="raw U center_x, raw U center_y, raw U R, raw U t, raw U norm_map", + out_params="raw U p, raw U map, raw T newmap", + preamble=map_utils( + resolution, + width, + height, + sensor_noise_factor, + min_valid_distance, + max_height_range, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, + ), + operation=string.Template( + """ + U rx = p[i * 3]; + U ry = p[i * 3 + 1]; + U rz = p[i * 3 + 2]; + U x = transform_p(rx, ry, rz, R[0], R[1], R[2], t[0]); + U y = transform_p(rx, ry, rz, R[3], R[4], R[5], t[1]); + U z = transform_p(rx, ry, rz, R[6], R[7], R[8], t[2]); + U v = z_noise(rz); + int idx = get_idx(x, y, center_x[0], center_y[0]); + if (is_valid(x, y, z, t[0], t[1], t[2])) { + if (is_inside(idx)) { + U map_h = map[get_map_idx(idx, 0)]; + U map_v = map[get_map_idx(idx, 1)]; + U num_points = newmap[get_map_idx(idx, 4)]; + if (abs(map_h - z) > (map_v * ${mahalanobis_thresh})) { + atomicAdd(&map[get_map_idx(idx, 1)], ${outlier_variance}); + } + else { + if (${enable_edge_shaped} && (num_points > ${wall_num_thresh}) && (z < map_h - map_v * ${mahalanobis_thresh} / num_points)) { + // continue; + } + else { + T new_h = (map_h * v + z * map_v) / (map_v + v); + T new_v = (map_v * v) / (map_v + v); + atomicAdd(&newmap[get_map_idx(idx, 0)], new_h); + atomicAdd(&newmap[get_map_idx(idx, 1)], new_v); + atomicAdd(&newmap[get_map_idx(idx, 2)], 1.0); + // is Valid + map[get_map_idx(idx, 2)] = 1; + // Time layer + map[get_map_idx(idx, 4)] = 0.0; + // Upper bound + map[get_map_idx(idx, 5)] = new_h; + map[get_map_idx(idx, 6)] = 0.0; + } + // visibility cleanup + } + } + } + if (${enable_visibility_cleanup}) { + float16 ray_x, ray_y, ray_z; + float16 ray_length = ray_vector(t[0], t[1], t[2], x, y, z, ray_x, ray_y, ray_z); + ray_length = min(ray_length, (float16)${max_ray_length}); + int last_nidx = -1; + for (float16 s=${ray_step}; s < ray_length; s+=${ray_step}) { + // iterate through ray + U nx = t[0] + ray_x * s; + U ny = t[1] + ray_y * s; + U nz = t[2] + ray_z * s; + int nidx = get_idx(nx, ny, center_x[0], center_y[0]); + if (last_nidx == nidx) {continue;} // Skip if we're still in the same cell + else {last_nidx = nidx;} + if (!is_inside(nidx)) {continue;} + + U nmap_h = map[get_map_idx(nidx, 0)]; + U nmap_v = map[get_map_idx(nidx, 1)]; + U nmap_valid = map[get_map_idx(nidx, 2)]; + // traversability + U nmap_trav = map[get_map_idx(nidx, 3)]; + // Time layer + U non_updated_t = map[get_map_idx(nidx, 4)]; + // upper bound + U nmap_upper = map[get_map_idx(nidx, 5)]; + U nmap_is_upper = map[get_map_idx(nidx, 6)]; + + // If point is close or is farther away than ray length, skip. + float16 d = (x - nx) * (x - nx) + (y - ny) * (y - ny) + (z - nz) * (z - nz); + if (d < 0.1 || !is_valid(x, y, z, t[0], t[1], t[2])) {continue;} + + // If invalid, do upper bound check, then skip + if (nmap_valid < 0.5) { + if (nz < nmap_upper || nmap_is_upper < 0.5) { + map[get_map_idx(nidx, 5)] = nz; + map[get_map_idx(nidx, 6)] = 1.0f; + } + continue; + } + // If updated recently, skip + if (non_updated_t < 0.5) {continue;} + + if (nmap_h > nz + 0.01 - min(nmap_v, 1.0) * 0.05) { + // If ray and norm is vertical, skip + U norm_x = norm_map[get_map_idx(nidx, 0)]; + U norm_y = norm_map[get_map_idx(nidx, 1)]; + U norm_z = norm_map[get_map_idx(nidx, 2)]; + float product = inner_product(ray_x, ray_y, ray_z, norm_x, norm_y, norm_z); + if (fabs(product) < ${cleanup_cos_thresh}) {continue;} + U num_points = newmap[get_map_idx(nidx, 3)]; + if (num_points > ${wall_num_thresh} && non_updated_t < 1.0) {continue;} + + // Finally, this cell is penetrated by the ray. + atomicAdd(&map[get_map_idx(nidx, 2)], -${cleanup_step}/(ray_length / ${max_ray_length})); + atomicAdd(&map[get_map_idx(nidx, 1)], ${outlier_variance}); + // Do upper bound check. + if (nz < nmap_upper || nmap_is_upper < 0.5) { + map[get_map_idx(nidx, 5)] = nz; + map[get_map_idx(nidx, 6)] = 1.0f; + } + } + } + } + p[i * 3]= idx; + p[i * 3 + 1] = is_valid(x, y, z, t[0], t[1], t[2]); + p[i * 3 + 2] = is_inside(idx); + """ + ).substitute( + mahalanobis_thresh=mahalanobis_thresh, + outlier_variance=outlier_variance, + wall_num_thresh=wall_num_thresh, + ray_step=resolution / 2 ** 0.5, + max_ray_length=max_ray_length, + cleanup_step=cleanup_step, + cleanup_cos_thresh=cleanup_cos_thresh, + enable_edge_shaped=int(enable_edge_shaped), + enable_visibility_cleanup=int(enable_visibility_cleanup), + ), + name="add_points_kernel", + ) + return add_points_kernel + + +def error_counting_kernel( + resolution, + width, + height, + sensor_noise_factor, + mahalanobis_thresh, + outlier_variance, + traversability_inlier, + min_valid_distance, + max_height_range, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, +): + error_counting_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U p, raw U center_x, raw U center_y, raw U R, raw U t", + out_params="raw U newmap, raw T error, raw T error_cnt", + preamble=map_utils( + resolution, + width, + height, + sensor_noise_factor, + min_valid_distance, + max_height_range, + ramped_height_range_a, + ramped_height_range_b, + ramped_height_range_c, + ), + operation=string.Template( + """ + U rx = p[i * 3]; + U ry = p[i * 3 + 1]; + U rz = p[i * 3 + 2]; + U x = transform_p(rx, ry, rz, R[0], R[1], R[2], t[0]); + U y = transform_p(rx, ry, rz, R[3], R[4], R[5], t[1]); + U z = transform_p(rx, ry, rz, R[6], R[7], R[8], t[2]); + U v = z_noise(rz); + // if (!is_valid(z, t[2])) {return;} + if (!is_valid(x, y, z, t[0], t[1], t[2])) {return;} + // if ((x - t[0]) * (x - t[0]) + (y - t[1]) * (y - t[1]) + (z - t[2]) * (z - t[2]) < 0.5) {return;} + int idx = get_idx(x, y, center_x[0], center_y[0]); + if (!is_inside(idx)) { + return; + } + U map_h = map[get_map_idx(idx, 0)]; + U map_v = map[get_map_idx(idx, 1)]; + U map_valid = map[get_map_idx(idx, 2)]; + U map_t = map[get_map_idx(idx, 3)]; + if (map_valid > 0.5 && (abs(map_h - z) < (map_v * ${mahalanobis_thresh})) + && map_v < ${outlier_variance} / 2.0 + && map_t > ${traversability_inlier}) { + T e = z - map_h; + atomicAdd(&error[0], e); + atomicAdd(&error_cnt[0], 1); + atomicAdd(&newmap[get_map_idx(idx, 3)], 1.0); + } + atomicAdd(&newmap[get_map_idx(idx, 4)], 1.0); + """ + ).substitute( + mahalanobis_thresh=mahalanobis_thresh, + outlier_variance=outlier_variance, + traversability_inlier=traversability_inlier, + ), + name="error_counting_kernel", + ) + return error_counting_kernel + + +def average_map_kernel(width, height, max_variance, initial_variance): + average_map_kernel = cp.ElementwiseKernel( + in_params="raw U newmap", + out_params="raw U map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U h = map[get_map_idx(i, 0)]; + U v = map[get_map_idx(i, 1)]; + U valid = map[get_map_idx(i, 2)]; + U new_h = newmap[get_map_idx(i, 0)]; + U new_v = newmap[get_map_idx(i, 1)]; + U new_cnt = newmap[get_map_idx(i, 2)]; + if (new_cnt > 0) { + if (new_v / new_cnt > ${max_variance}) { + map[get_map_idx(i, 0)] = 0; + map[get_map_idx(i, 1)] = ${initial_variance}; + map[get_map_idx(i, 2)] = 0; + } + else { + map[get_map_idx(i, 0)] = new_h / new_cnt; + map[get_map_idx(i, 1)] = new_v / new_cnt; + map[get_map_idx(i, 2)] = 1; + } + } + if (valid < 0.5) { + map[get_map_idx(i, 0)] = 0; + map[get_map_idx(i, 1)] = ${initial_variance}; + map[get_map_idx(i, 2)] = 0; + } + """ + ).substitute(max_variance=max_variance, initial_variance=initial_variance), + name="average_map_kernel", + ) + return average_map_kernel + + +def dilation_filter_kernel(width, height, dilation_size): + dilation_filter_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U mask", + out_params="raw U newmap, raw U newmask", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ int get_relative_map_idx(int idx, int dx, int dy, int layer_n) { + const int layer = ${width} * ${height}; + const int relative_idx = idx + ${width} * dy + dx; + return layer * layer_n + relative_idx; + } + __device__ bool is_inside(int idx) { + int idx_x = idx / ${width}; + int idx_y = idx % ${width}; + if (idx_x <= 0 || idx_x >= ${width} - 1) { + return false; + } + if (idx_y <= 0 || idx_y >= ${height} - 1) { + return false; + } + return true; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + U h = map[get_map_idx(i, 0)]; + U valid = mask[get_map_idx(i, 0)]; + newmap[get_map_idx(i, 0)] = h; + if (valid < 0.5) { + U distance = 100; + U near_value = 0; + for (int dy = -${dilation_size}; dy <= ${dilation_size}; dy++) { + for (int dx = -${dilation_size}; dx <= ${dilation_size}; dx++) { + int idx = get_relative_map_idx(i, dx, dy, 0); + if (!is_inside(idx)) {continue;} + U valid = mask[idx]; + if(valid > 0.5 && dx + dy < distance) { + distance = dx + dy; + near_value = map[idx]; + } + } + } + if(distance < 100) { + newmap[get_map_idx(i, 0)] = near_value; + newmask[get_map_idx(i, 0)] = 1.0; + } + } + """ + ).substitute(dilation_size=dilation_size), + name="dilation_filter_kernel", + ) + return dilation_filter_kernel + + +def normal_filter_kernel(width, height, resolution): + normal_filter_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U mask", + out_params="raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ int get_relative_map_idx(int idx, int dx, int dy, int layer_n) { + const int layer = ${width} * ${height}; + const int relative_idx = idx + ${width} * dy + dx; + return layer * layer_n + relative_idx; + } + __device__ bool is_inside(int idx) { + int idx_x = idx / ${width}; + int idx_y = idx % ${width}; + if (idx_x <= 0 || idx_x >= ${width} - 1) { + return false; + } + if (idx_y <= 0 || idx_y >= ${height} - 1) { + return false; + } + return true; + } + __device__ float resolution() { + return ${resolution}; + } + """ + ).substitute(width=width, height=height, resolution=resolution), + operation=string.Template( + """ + U h = map[get_map_idx(i, 0)]; + U valid = mask[get_map_idx(i, 0)]; + if (valid > 0.5) { + int idx_x = get_relative_map_idx(i, 1, 0, 0); + int idx_y = get_relative_map_idx(i, 0, 1, 0); + if (!is_inside(idx_x) || !is_inside(idx_y)) { return; } + float dzdx = (map[idx_x] - h); + float dzdy = (map[idx_y] - h); + float nx = -dzdy / resolution(); + float ny = -dzdx / resolution(); + float nz = 1; + float norm = sqrt((nx * nx) + (ny * ny) + 1); + newmap[get_map_idx(i, 0)] = nx / norm; + newmap[get_map_idx(i, 1)] = ny / norm; + newmap[get_map_idx(i, 2)] = nz / norm; + } + """ + ).substitute(), + name="normal_filter_kernel", + ) + return normal_filter_kernel + + +def polygon_mask_kernel(width, height, resolution): + polygon_mask_kernel = cp.ElementwiseKernel( + in_params="raw U polygon, raw U center_x, raw U center_y, raw int16 polygon_n, raw U polygon_bbox", + out_params="raw U mask", + preamble=string.Template( + """ + __device__ struct Point + { + int x; + int y; + }; + // Given three colinear points p, q, r, the function checks if + // point q lies on line segment 'pr' + __device__ bool onSegment(Point p, Point q, Point r) + { + if (q.x <= max(p.x, r.x) && q.x >= min(p.x, r.x) && + q.y <= max(p.y, r.y) && q.y >= min(p.y, r.y)) + return true; + return false; + } + // To find orientation of ordered triplet (p, q, r). + // The function returns following values + // 0 --> p, q and r are colinear + // 1 --> Clockwise + // 2 --> Counterclockwise + __device__ int orientation(Point p, Point q, Point r) + { + int val = (q.y - p.y) * (r.x - q.x) - + (q.x - p.x) * (r.y - q.y); + if (val == 0) return 0; // colinear + return (val > 0)? 1: 2; // clock or counterclock wise + } + // The function that returns true if line segment 'p1q1' + // and 'p2q2' intersect. + __device__ bool doIntersect(Point p1, Point q1, Point p2, Point q2) + { + // Find the four orientations needed for general and + // special cases + int o1 = orientation(p1, q1, p2); + int o2 = orientation(p1, q1, q2); + int o3 = orientation(p2, q2, p1); + int o4 = orientation(p2, q2, q1); + // General case + if (o1 != o2 && o3 != o4) + return true; + // Special Cases + // p1, q1 and p2 are colinear and p2 lies on segment p1q1 + if (o1 == 0 && onSegment(p1, p2, q1)) return true; + // p1, q1 and p2 are colinear and q2 lies on segment p1q1 + if (o2 == 0 && onSegment(p1, q2, q1)) return true; + // p2, q2 and p1 are colinear and p1 lies on segment p2q2 + if (o3 == 0 && onSegment(p2, p1, q2)) return true; + // p2, q2 and q1 are colinear and q1 lies on segment p2q2 + if (o4 == 0 && onSegment(p2, q1, q2)) return true; + return false; // Doesn't fall in any of the above cases + } + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ int get_idx_x(int idx){ + int idx_x = idx / ${width}; + return idx_x; + } + + __device__ int get_idx_y(int idx){ + int idx_y = idx % ${width}; + return idx_y; + } + + __device__ float16 clamp(float16 x, float16 min_x, float16 max_x) { + + return max(min(x, max_x), min_x); + } + __device__ float16 round(float16 x) { + return (int)x + (int)(2 * (x - (int)x)); + } + __device__ int get_x_idx(float16 x, float16 center) { + const float resolution = ${resolution}; + const float width = ${width}; + int i = (x - center) / resolution + 0.5 * width; + return i; + } + __device__ int get_y_idx(float16 y, float16 center) { + const float resolution = ${resolution}; + const float height = ${height}; + int i = (y - center) / resolution + 0.5 * height; + return i; + } + __device__ int get_idx(float16 x, float16 y, float16 center_x, float16 center_y) { + int idx_x = clamp(get_x_idx(x, center_x), 0, ${width} - 1); + int idx_y = clamp(get_y_idx(y, center_y), 0, ${height} - 1); + return ${width} * idx_x + idx_y; + } + + """ + ).substitute(width=width, height=height, resolution=resolution), + operation=string.Template( + """ + // Point p = {get_idx_x(i, center_x[0]), get_idx_y(i, center_y[0])}; + Point p = {get_idx_x(i), get_idx_y(i)}; + Point extreme = {100000, p.y}; + int bbox_min_idx = get_idx(polygon_bbox[0], polygon_bbox[1], center_x[0], center_y[0]); + int bbox_max_idx = get_idx(polygon_bbox[2], polygon_bbox[3], center_x[0], center_y[0]); + Point bmin = {get_idx_x(bbox_min_idx), get_idx_y(bbox_min_idx)}; + Point bmax = {get_idx_x(bbox_max_idx), get_idx_y(bbox_max_idx)}; + if (p.x < bmin.x || p.x > bmax.x || p.y < bmin.y || p.y > bmax.y){ + mask[i] = 0; + return; + } + else { + int intersect_cnt = 0; + for (int j = 0; j < polygon_n[0]; j++) { + Point p1, p2; + int i1 = get_idx(polygon[j * 2 + 0], polygon[j * 2 + 1], center_x[0], center_y[0]); + p1.x = get_idx_x(i1); + p1.y = get_idx_y(i1); + int j2 = (j + 1) % polygon_n[0]; + int i2 = get_idx(polygon[j2 * 2 + 0], polygon[j2 * 2 + 1], center_x[0], center_y[0]); + p2.x = get_idx_x(i2); + p2.y = get_idx_y(i2); + if (doIntersect(p1, p2, p, extreme)) + { + // If the point 'p' is colinear with line segment 'i-next', + // then check if it lies on segment. If it lies, return true, + // otherwise false + if (orientation(p1, p, p2) == 0) { + if (onSegment(p1, p, p2)){ + mask[i] = 1; + return; + } + } + else if(((p1.y <= p.y) && (p2.y > p.y)) || ((p1.y > p.y) && (p2.y <= p.y))){ + intersect_cnt++; + } + } + } + if (intersect_cnt % 2 == 0) { mask[i] = 0; } + else { mask[i] = 1; } + } + """ + ).substitute(a=1), + name="polygon_mask_kernel", + ) + return polygon_mask_kernel + + + +def image_to_map_correspondence_kernel(resolution, width, height, tolerance_z_collision): + """ + This function calculates the correspondence between the image and the map. + It takes in the resolution, width, height, and tolerance_z_collision as parameters. + The function returns a kernel that can be used to perform the correspondence calculation. + """ + _image_to_map_correspondence_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U x1, raw U y1, raw U z1, raw U P, raw U K, raw U D, raw U image_height, raw U image_width, raw U center", + out_params="raw U uv_correspondence, raw B valid_correspondence", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + __device__ bool is_inside_map(int x, int y) { + return (x >= 0 && y >= 0 && x<${width} && y<${height}); + } + __device__ float get_l2_distance(int x0, int y0, int x1, int y1) { + float dx = x0-x1; + float dy = y0-y1; + return sqrt( dx*dx + dy*dy); + } + """ + ).substitute(width=width, height=height, resolution=resolution), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + + // return if gridcell has no valid height + if (map[get_map_idx(i, 2)] != 1){ + return; + } + + // get current cell position + int y0 = i % ${width}; + int x0 = i / ${width}; + + // gridcell 3D point in worldframe TODO reverse x and y + float p1 = (x0-(${width}/2)) * ${resolution} + center[0]; + float p2 = (y0-(${height}/2)) * ${resolution} + center[1]; + float p3 = map[cell_idx] + center[2]; + + // reproject 3D point into image plane + float u = p1 * P[0] + p2 * P[1] + p3 * P[2] + P[3]; + float v = p1 * P[4] + p2 * P[5] + p3 * P[6] + P[7]; + float d = p1 * P[8] + p2 * P[9] + p3 * P[10] + P[11]; + + // filter point behind image plane + if (d <= 0) { + return; + } + u = u/d; + v = v/d; + + // Check if D is all zeros + bool is_D_zero = (D[0] == 0 && D[1] == 0 && D[2] == 0 && D[3] == 0 && D[4] == 0); + + // Apply undistortion using distortion matrix D if not all zeros + if (!is_D_zero) { + float k1 = D[0]; + float k2 = D[1]; + float p1 = D[2]; + float p2 = D[3]; + float k3 = D[4]; + float fx = K[0]; + float fy = K[4]; + float cx = K[2]; + float cy = K[5]; + float x = (u - cx) / fx; + float y = (v - cy) / fy; + float r2 = x * x + y * y; + float radial_distortion = 1 + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2; + float u_corrected = x * radial_distortion + 2 * p1 * x * y + p2 * (r2 + 2 * x * x); + float v_corrected = y * radial_distortion + 2 * p2 * x * y + p1 * (r2 + 2 * y * y); + u = fx * u_corrected + cx; + v = fy * v_corrected + cy; + } + + // filter point next to image plane + if ((u < 0) || (v < 0) || (u >= image_width) || (v >= image_height)){ + return; + } + + int y0_c = y0; + int x0_c = x0; + float total_dis = get_l2_distance(x0_c, y0_c, x1,y1); + float z0 = map[cell_idx]; + float delta_z = z1-z0; + + + // bresenham algorithm to iterate over cells in line between camera center and current gridmap cell + // https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm + int dx = abs(x1-x0); + int sx = x0 < x1 ? 1 : -1; + int dy = -abs(y1 - y0); + int sy = y0 < y1 ? 1 : -1; + int error = dx + dy; + + bool is_valid = true; + + // iterate over all cells along line + while (1){ + // assumption we do not need to check the height for camera center cell + if (x0 == x1 && y0 == y1){ + break; + } + + // check if height is invalid + if (is_inside_map(x0,y0)){ + int idx = y0 + (x0 * ${width}); + if (map[get_map_idx(idx, 2)]){ + float dis = get_l2_distance(x0_c, y0_c, x0, y0); + float rayheight = z0 + ( dis / total_dis * delta_z); + if ( map[idx] - ${tolerance_z_collision} > rayheight){ + is_valid = false; + break; + } + } + } + + + // computation of next gridcell index in line + int e2 = 2 * error; + if (e2 >= dy){ + if(x0 == x1){ + break; + } + error = error + dy; + x0 = x0 + sx; + } + if (e2 <= dx){ + if (y0 == y1){ + break; + } + error = error + dx; + y0 = y0 + sy; + } + } + + // mark the correspondence + uv_correspondence[get_map_idx(i, 0)] = u; + uv_correspondence[get_map_idx(i, 1)] = v; + valid_correspondence[get_map_idx(i, 0)] = is_valid; + """ + ).substitute(height=height, width=width, resolution=resolution, tolerance_z_collision=tolerance_z_collision), + name="image_to_map_correspondence_kernel", + ) + return _image_to_map_correspondence_kernel + + +def average_correspondences_to_map_kernel(width, height): + """ + This function calculates the average correspondences to the map. + It takes in the width and height as parameters. + The function returns a kernel that can be used to perform the correspondence calculation. + """ + _average_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_mono, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + int idx = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + new_sem_map[get_map_idx(i, map_idx)] = image_mono[idx]; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + + """ + ).substitute(), + name="average_correspondences_to_map_kernel", + ) + return _average_correspondences_to_map_kernel + + +def exponential_correspondences_to_map_kernel(width, height, alpha): + """ + This function calculates the exponential correspondences to the map. + It takes in the width, height, and alpha as parameters. + The function returns a kernel that can be used to perform the correspondence calculation. + """ + _exponential_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_mono, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + int idx = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)] * (1-${alpha}) + ${alpha} * image_mono[idx]; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + + """ + ).substitute(alpha=alpha), + name="exponential_correspondences_to_map_kernel", + ) + return _exponential_correspondences_to_map_kernel + + +def color_correspondences_to_map_kernel(width, height): + """ + This function calculates the color correspondences to the map. + It takes in the width and height as parameters. + The function returns a kernel that can be used to perform the correspondence calculation. + """ + _color_correspondences_to_map_kernel = cp.ElementwiseKernel( + in_params="raw U sem_map, raw U map_idx, raw U image_rgb, raw U uv_correspondence, raw B valid_correspondence, raw U image_height, raw U image_width", + out_params="raw U new_sem_map", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + """ + ).substitute(width=width, height=height), + operation=string.Template( + """ + int cell_idx = get_map_idx(i, 0); + if (valid_correspondence[cell_idx]){ + int cell_idx_2 = get_map_idx(i, 1); + + int idx_red = int(uv_correspondence[cell_idx]) + int(uv_correspondence[cell_idx_2]) * image_width; + int idx_green = image_width * image_height + idx_red; + int idx_blue = image_width * image_height * 2 + idx_red; + + unsigned int r = image_rgb[idx_red]; + unsigned int g = image_rgb[idx_green]; + unsigned int b = image_rgb[idx_blue]; + + unsigned int rgb = (r<<16) + (g << 8) + b; + float rgb_ = __uint_as_float(rgb); + new_sem_map[get_map_idx(i, map_idx)] = rgb_; + }else{ + new_sem_map[get_map_idx(i, map_idx)] = sem_map[get_map_idx(i, map_idx)]; + } + """ + ).substitute(), + name="color_correspondences_to_map_kernel", + ) + return _color_correspondences_to_map_kernel diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/listener_test.py b/elevation_mapping_cupy/elevation_mapping_cupy/listener_test.py new file mode 100644 index 00000000..212d13f4 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/listener_test.py @@ -0,0 +1,33 @@ +import rclpy +from rclpy.node import Node +import tf2_ros +from geometry_msgs.msg import TransformStamped +from tf2_msgs.msg import TFMessage + +class TfListener(Node): + def __init__(self): + super().__init__('tf_listener') + self.subscription = self.create_subscription( + TFMessage, + '/tf', + self.listener_callback, + 10) + self.last_time = None + + def listener_callback(self, msg): + for transform in msg.transforms: + current_time = transform.header.stamp.sec + transform.header.stamp.nanosec * 1e-9 + if self.last_time is not None: + delta = current_time - self.last_time + self.get_logger().info(f"Delta time: {delta:.6f} seconds") + self.last_time = current_time + +def main(args=None): + rclpy.init(args=args) + node = TfListener() + rclpy.spin(node) + node.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/map_initializer.py b/elevation_mapping_cupy/elevation_mapping_cupy/map_initializer.py new file mode 100644 index 00000000..0139ea76 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/map_initializer.py @@ -0,0 +1,80 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +from scipy.interpolate import griddata +import numpy as np +import cupy as cp + + +class MapInitializer(object): + def __init__(self, initial_variance, new_variance, xp=np, method="points"): + self.methods = ["points"] + assert method in self.methods, "method should be chosen from {}".format(self.methods) + self.method = method + self.xp = xp + self.initial_variance = initial_variance + self.new_variance = new_variance + + def __call__(self, *args, **kwargs): + if self.method == "points": + self.points_initializer(*args, **kwargs) + else: + return + + def points_initializer(self, elevation_map, points, method="linear"): + """Initialize the map using interpolation between given points + + Args: + elevation_map (cupy._core.core.ndarray): elevation_map data. + points (cupy._core.core.ndarray): points used to interpolate. + method (str): method for interpolation. (nearest, linear, cubic) + + """ + # points from existing map. + points_idx = self.xp.where(elevation_map[2] > 0.5) + values = elevation_map[0, points_idx[0], points_idx[1]] + + # Add external points for interpolation. + points_idx = self.xp.stack(points_idx).T + points_idx = self.xp.vstack([points_idx, points[:, :2]]) + values = self.xp.hstack([values, points[:, 2]]) + + assert points_idx.shape[0] > 3, "Initialization points must be more than 3." + + # Interpolation using griddata function. + w = elevation_map.shape[1] + h = elevation_map.shape[2] + grid_x, grid_y = np.mgrid[0:w, 0:h] + if self.xp == cp: + points_idx = cp.asnumpy(points_idx) + values = cp.asnumpy(values) + interpolated = griddata(points_idx, values, (grid_x, grid_y), method=method) + if self.xp == cp: + interpolated = cp.asarray(interpolated) + + # Update elevation map. + elevation_map[0] = self.xp.nan_to_num(interpolated) + elevation_map[1] = self.xp.where( + self.xp.invert(self.xp.isnan(interpolated)), self.new_variance, self.initial_variance + ) + elevation_map[2] = self.xp.where(self.xp.invert(self.xp.isnan(interpolated)), 1.0, 0.0) + return + + +if __name__ == "__main__": + initializer = MapInitializer(100, 10, method="points", xp=cp) + m = np.zeros((4, 10, 10)) + m[0, 0:5, 2:5] = 0.3 + m[2, 0:5, 2:5] = 1.0 + np.set_printoptions(threshold=100) + print(m[0]) + print(m[1]) + print(m[2]) + points = cp.array([[0, 0, 0.2], [8, 0, 0.2], [6, 9, 0.2]]) + # [3, 3, 0.2]]) + m = cp.asarray(m) + initializer(m, points, method="cubic") + print(m[0]) + print(m[1]) + print(m[2]) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/parameter.py b/elevation_mapping_cupy/elevation_mapping_cupy/parameter.py new file mode 100644 index 00000000..059fbb4c --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/parameter.py @@ -0,0 +1,293 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +from dataclasses import dataclass, field +import pickle +import numpy as np +from simple_parsing.helpers import Serializable +from dataclasses import field +from typing import Tuple + + +@dataclass +class Parameter(Serializable): + """ + This class holds the parameters for the elevation mapping algorithm. + + Attributes: + resolution: The resolution in meters. + (Default: ``0.04``) + subscriber_cfg: The configuration for the subscriber. + (Default: ``{ "front_cam": { "channels": ["rgb", "person"], "topic_name": "/elevation_mapping/pointcloud_semantic", "data_type": "pointcloud", } }``) + additional_layers: The additional layers for the map. + (Default: ``["color"]``) + fusion_algorithms: The list of fusion algorithms. + (Default: ``[ "image_color", "image_exponential", "pointcloud_average", "pointcloud_bayesian_inference", "pointcloud_class_average", "pointcloud_class_bayesian", "pointcloud_class_max", "pointcloud_color", ]``) + pointcloud_channel_fusions: The fusion for pointcloud channels. + (Default: ``{"rgb": "color", "default": "class_average"}``) + image_channel_fusions: The fusion for image channels. + (Default: ``{"rgb": "color", "default": "exponential"}``) + data_type: The data type for the map. + (Default: ``np.float32``) + average_weight: The weight for the average fusion. + (Default: ``0.5``) + map_length: The map's size in meters. + (Default: ``8.0``) + sensor_noise_factor: The point's noise is sensor_noise_factor*z^2 (z is distance from sensor). + (Default: ``0.05``) + mahalanobis_thresh: Points outside this distance is outlier. + (Default: ``2.0``) + outlier_variance: If point is outlier, add this value to the cell. + (Default: ``0.01``) + drift_compensation_variance_inlier: Cells under this value is used for drift compensation. + (Default: ``0.1``) + time_variance: Add this value when update_variance is called. + (Default: ``0.01``) + time_interval: Time layer is updated with this interval. + (Default: ``0.1``) + max_variance: The maximum variance for each cell. + (Default: ``1.0``) + dilation_size: The dilation filter size before traversability filter. + (Default: ``2``) + dilation_size_initialize: The dilation size after the init. + (Default: ``10``) + drift_compensation_alpha: The drift compensation alpha for smoother update of drift compensation. + (Default: ``1.0``) + traversability_inlier: Cells with higher traversability are used for drift compensation. + (Default: ``0.1``) + wall_num_thresh: If there are more points than this value, only higher points than the current height are used to make the wall more sharp. + (Default: ``100``) + min_height_drift_cnt: Drift compensation only happens if the valid cells are more than this number. + (Default: ``100``) + max_ray_length: The maximum length for ray tracing. + (Default: ``2.0``) + cleanup_step: Substitute this value from validity layer at visibility cleanup. + (Default: ``0.01``) + cleanup_cos_thresh: Substitute this value from validity layer at visibility cleanup. + (Default: ``0.5``) + min_valid_distance: Points with shorter distance will be filtered out. + (Default: ``0.3``) + max_height_range: Points higher than this value from sensor will be filtered out to disable ceiling. + (Default: ``1.0``) + ramped_height_range_a: If z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + (Default: ``0.3``) + ramped_height_range_b: If z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + (Default: ``1.0``) + ramped_height_range_c: If z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + (Default: ``0.2``) + safe_thresh: If traversability is smaller, it is counted as unsafe cell. + (Default: ``0.5``) + safe_min_thresh: Polygon is unsafe if there exists lower traversability than this. + (Default: ``0.5``) + max_unsafe_n: If the number of cells under safe_thresh exceeds this value, polygon is unsafe. + (Default: ``20``) + checker_layer: Layer used for checking safety. + (Default: ``"traversability"``) + max_drift: The maximum drift for the compensation. + (Default: ``0.10``) + overlap_clear_range_xy: XY range [m] for clearing overlapped area. This defines the valid area for overlap clearance. (used for multi floor setting) + (Default: ``4.0``) + overlap_clear_range_z: Z range [m] for clearing overlapped area. Cells outside this range will be cleared. (used for multi floor setting) + (Default: ``2.0``) + enable_edge_sharpen: Enable edge sharpening. + (Default: ``True``) + enable_drift_compensation: Enable drift compensation. + (Default: ``True``) + enable_visibility_cleanup: Enable visibility cleanup. + (Default: ``True``) + enable_overlap_clearance: Enable overlap clearance. + (Default: ``True``) + use_only_above_for_upper_bound: Use only above for upper bound. + (Default: ``True``) + use_chainer: Use chainer as a backend of traversability filter or pytorch. If false, it uses pytorch. Pytorch requires ~2GB more GPU memory compared to chainer but runs faster. + (Default: ``True``) + position_noise_thresh: If the position change is bigger than this value, the drift compensation happens. + (Default: ``0.1``) + orientation_noise_thresh: If the orientation change is bigger than this value, the drift compensation happens. + (Default: ``0.1``) + plugin_config_file: Configuration file for the plugin. + (Default: ``"config/plugin_config.yaml"``) + weight_file: Weight file for traversability filter. + (Default: ``"config/weights.dat"``) + initial_variance: Initial variance for each cell. + (Default: ``10.0``) + initialized_variance: Initialized variance for each cell. + (Default: ``10.0``) + w1: Weights for the first layer. + (Default: ``np.zeros((4, 1, 3, 3))``) + w2: Weights for the second layer. + (Default: ``np.zeros((4, 1, 3, 3))``) + w3: Weights for the third layer. + (Default: ``np.zeros((4, 1, 3, 3))``) + w_out: Weights for the output layer. + (Default: ``np.zeros((1, 12, 1, 1))``) + true_map_length: True length of the map. + (Default: ``None``) + cell_n: Number of cells in the map. + (Default: ``None``) + true_cell_n: True number of cells in the map. + (Default: ``None``) + + """ + resolution: float = 0.04 # resolution in m. + subscriber_cfg: dict = field( + default_factory=lambda: { + "front_cam": { + "channels": ["rgb", "person"], + "topic_name": "/elevation_mapping/pointcloud_semantic", + "data_type": "pointcloud", + } + } + ) # configuration for the subscriber + additional_layers: list = field(default_factory=lambda: ["color"]) # additional layers for the map + fusion_algorithms: list = field( + default_factory=lambda: [ + "image_color", + "image_exponential", + "pointcloud_average", + "pointcloud_bayesian_inference", + "pointcloud_class_average", + "pointcloud_class_bayesian", + "pointcloud_class_max", + "pointcloud_color", + ] + ) # list of fusion algorithms + pointcloud_channel_fusions: dict = field(default_factory=lambda: {"rgb": "color", "default": "class_average"}) # fusion for pointcloud channels + image_channel_fusions: dict = field(default_factory=lambda: {"rgb": "color", "default": "exponential"}) # fusion for image channels + data_type: str = np.float32 # data type for the map + average_weight: float = 0.5 # weight for the average fusion + + map_length: float = 8.0 # map's size in m. + sensor_noise_factor: float = 0.05 # point's noise is sensor_noise_factor*z^2 (z is distance from sensor). + mahalanobis_thresh: float = 2.0 # points outside this distance is outlier. + outlier_variance: float = 0.01 # if point is outlier, add this value to the cell. + drift_compensation_variance_inlier: float = 0.1 # cells under this value is used for drift compensation. + time_variance: float = 0.01 # add this value when update_variance is called. + time_interval: float = 0.1 # Time layer is updated with this interval. + + max_variance: float = 1.0 # maximum variance for each cell. + dilation_size: int = 2 # dilation filter size before traversability filter. + dilation_size_initialize: int = 10 # dilation size after the init. + drift_compensation_alpha: float = 1.0 # drift compensation alpha for smoother update of drift compensation. + + traversability_inlier: float = 0.1 # cells with higher traversability are used for drift compensation. + wall_num_thresh: int = 100 # if there are more points than this value, only higher points than the current height are used to make the wall more sharp. + min_height_drift_cnt: int = 100 # drift compensation only happens if the valid cells are more than this number. + + max_ray_length: float = 2.0 # maximum length for ray tracing. + cleanup_step: float = 0.01 # substitute this value from validity layer at visibility cleanup. + cleanup_cos_thresh: float = 0.5 # substitute this value from validity layer at visibility cleanup. + min_valid_distance: float = 0.3 # points with shorter distance will be filtered out. + max_height_range: float = 1.0 # points higher than this value from sensor will be filtered out to disable ceiling. + ramped_height_range_a: float = 0.3 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + ramped_height_range_b: float = 1.0 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + ramped_height_range_c: float = 0.2 # if z > max(d - ramped_height_range_b, 0) * ramped_height_range_a + ramped_height_range_c, reject. + + safe_thresh: float = 0.5 # if traversability is smaller, it is counted as unsafe cell. + safe_min_thresh: float = 0.5 # polygon is unsafe if there exists lower traversability than this. + max_unsafe_n: int = 20 # if the number of cells under safe_thresh exceeds this value, polygon is unsafe. + checker_layer: str = "traversability" # layer used for checking safety + + max_drift: float = 0.10 # maximum drift for the compensation + + overlap_clear_range_xy: float = 4.0 # xy range [m] for clearing overlapped area. this defines the valid area for overlap clearance. (used for multi floor setting) + overlap_clear_range_z: float = 2.0 # z range [m] for clearing overlapped area. cells outside this range will be cleared. (used for multi floor setting) + + enable_edge_sharpen: bool = True # enable edge sharpening + enable_drift_compensation: bool = True # enable drift compensation + enable_visibility_cleanup: bool = True # enable visibility cleanup + enable_overlap_clearance: bool = True # enable overlap clearance + use_only_above_for_upper_bound: bool = True # use only above for upper bound + use_chainer: bool = True # use chainer as a backend of traversability filter or pytorch. If false, it uses pytorch. pytorch requires ~2GB more GPU memory compared to chainer but runs faster. + position_noise_thresh: float = 0.1 # if the position change is bigger than this value, the drift compensation happens. + orientation_noise_thresh: float = 0.1 # if the orientation change is bigger than this value, the drift compensation happens. + + plugin_config_file: str = "config/plugin_config.yaml" # configuration file for the plugin + weight_file: str = "config/weights.dat" # weight file for traversability filter + + initial_variance: float = 10.0 # initial variance for each cell. + initialized_variance: float = 10.0 # initialized variance for each cell. + w1: np.ndarray = field(default_factory=lambda: np.zeros((4, 1, 3, 3))) # weights for the first layer + w2: np.ndarray = field(default_factory=lambda: np.zeros((4, 1, 3, 3))) # weights for the second layer + w3: np.ndarray = field(default_factory=lambda: np.zeros((4, 1, 3, 3))) # weights for the third layer + w_out: np.ndarray = field(default_factory=lambda: np.zeros((1, 12, 1, 1))) # weights for the output layer + + # # not configurable params + true_map_length: float = None # true length of the map + cell_n: int = None # number of cells in the map + true_cell_n: int = None # true number of cells in the map + + def load_weights(self, filename: str): + """ + Load weights from a file into the model's parameters. + + Args: + filename (str): The path to the file containing the weights. + """ + with open(filename, "rb") as file: + weights = pickle.load(file) + self.w1 = weights["conv1.weight"] + self.w2 = weights["conv2.weight"] + self.w3 = weights["conv3.weight"] + self.w_out = weights["conv_final.weight"] + + def get_names(self): + """ + Get the names of the parameters. + + Returns: + list: A list of parameter names. + """ + return list(self.__annotations__.keys()) + + def get_types(self): + """ + Get the types of the parameters. + + Returns: + list: A list of parameter types. + """ + return [v.__name__ for v in self.__annotations__.values()] + + def set_value(self, name, value): + """ + Set the value of a parameter. + + Args: + name (str): The name of the parameter. + value (any): The new value for the parameter. + """ + setattr(self, name, value) + + def get_value(self, name): + """ + Get the value of a parameter. + + Args: + name (str): The name of the parameter. + + Returns: + any: The value of the parameter. + """ + return getattr(self, name) + + def update(self): + """ + Update the parameters related to the map size and resolution. + """ + # +2 is a border for outside map + self.cell_n = int(round(self.map_length / self.resolution)) + 2 + self.true_cell_n = round(self.map_length / self.resolution) + self.true_map_length = self.true_cell_n * self.resolution + + +if __name__ == "__main__": + param = Parameter() + print(param) + print(param.resolution) + param.set_value("resolution", 0.1) + print(param.resolution) + + print("names ", param.get_names()) + print("types ", param.get_types()) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/__init__.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/erosion.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/erosion.py new file mode 100644 index 00000000..7b0211df --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/erosion.py @@ -0,0 +1,113 @@ +# +# Copyright (c) 2024, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cv2 as cv +import cupy as cp +import numpy as np + +from typing import List + +from .plugin_manager import PluginBase + + +class Erosion(PluginBase): + """ + This class is used for applying erosion to an elevation map or specific layers within it. + Erosion is a morphological operation that is used to remove small-scale details from a binary image. + + Args: + kernel_size (int): Size of the erosion kernel. Default is 3, which means a 3x3 square kernel. + iterations (int): Number of times erosion is applied. Default is 1. + **kwargs (): Additional keyword arguments. + """ + + def __init__( + self, + input_layer_name="traversability", + kernel_size: int = 3, + iterations: int = 1, + reverse: bool = False, + default_layer_name: str = "traversability", + **kwargs, + ): + super().__init__() + self.input_layer_name = input_layer_name + self.kernel_size = kernel_size + self.iterations = iterations + self.reverse = reverse + self.default_layer_name = default_layer_name + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_map: cp.ndarray, + semantic_layer_names: List[str], + *args, + ) -> cp.ndarray: + """ + Applies erosion to the given elevation map. + + Args: + elevation_map (cupy._core.core.ndarray): The elevation map to be eroded. + layer_names (List[str]): Names of the layers in the elevation map. + plugin_layers (cupy._core.core.ndarray): Layers provided by other plugins. + plugin_layer_names (List[str]): Names of the layers provided by other plugins. + *args (): Additional arguments. + + Returns: + cupy._core.core.ndarray: The eroded elevation map. + """ + # Convert the elevation map to a format suitable for erosion (if necessary) + layer_data = self.get_layer_data( + elevation_map, + layer_names, + plugin_layers, + plugin_layer_names, + semantic_map, + semantic_layer_names, + self.input_layer_name, + ) + if layer_data is None: + print(f"No layers are found, using {self.default_layer_name}!") + layer_data = self.get_layer_data( + elevation_map, + layer_names, + plugin_layers, + plugin_layer_names, + semantic_map, + semantic_layer_names, + self.default_layer_name, + ) + if layer_data is None: + print(f"No layers are found, using traversability!") + layer_data = self.get_layer_data( + elevation_map, + layer_names, + plugin_layers, + plugin_layer_names, + semantic_map, + semantic_layer_names, + "traversability", + ) + layer_np = cp.asnumpy(layer_data) + + # Define the erosion kernel + kernel = np.ones((self.kernel_size, self.kernel_size), np.uint8) + + if self.reverse: + layer_np = 1 - layer_np + # Apply erosion + layer_min = float(layer_np.min()) + layer_max = float(layer_np.max()) + layer_np_normalized = ((layer_np - layer_min) * 255 / (layer_max - layer_min)).astype("uint8") + eroded_map_np = cv.erode(layer_np_normalized, kernel, iterations=self.iterations) + eroded_map_np = eroded_map_np.astype(np.float32) * (layer_max - layer_min) / 255 + layer_min + if self.reverse: + eroded_map_np = 1 - eroded_map_np + + # Convert back to cupy array and return + return cp.asarray(eroded_map_np) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/features_pca.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/features_pca.py new file mode 100644 index 00000000..94f88823 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/features_pca.py @@ -0,0 +1,96 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +from typing import List +import re + +from elevation_mapping_cupy.plugins.plugin_manager import PluginBase +from sklearn.decomposition import PCA + + +class FeaturesPca(PluginBase): + """This is a filter to create a pca layer of the semantic features in the map. + + Args: + cell_n (int): width and height of the elevation map. + classes (ruamel.yaml.comments.CommentedSeq): + **kwargs (): + """ + + def __init__( + self, cell_n: int = 100, process_layer_names: List[str] = [], **kwargs, + ): + super().__init__() + self.process_layer_names = process_layer_names + + def get_layer_indices(self, layer_names: List[str]) -> List[int]: + """ Get the indices of the layers that are to be processed using regular expressions. + Args: + layer_names (List[str]): List of layer names. + Returns: + List[int]: List of layer indices. + """ + indices = [] + for i, layer_name in enumerate(layer_names): + if any(re.match(pattern, layer_name) for pattern in self.process_layer_names): + indices.append(i) + return indices + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_map: cp.ndarray, + semantic_layer_names: List[str], + *args, + ) -> cp.ndarray: + """ + + Args: + elevation_map (cupy._core.core.ndarray): + layer_names (List[str]): + plugin_layers (cupy._core.core.ndarray): + plugin_layer_names (List[str]): + semantic_map (elevation_mapping_cupy.semantic_map.SemanticMap): + *args (): + + Returns: + cupy._core.core.ndarray: + """ + # get indices of all layers that contain semantic features information + data = [] + for m, layer_names in zip( + [elevation_map, plugin_layers, semantic_map], [layer_names, plugin_layer_names, semantic_layer_names] + ): + layer_indices = self.get_layer_indices(layer_names) + if len(layer_indices) > 0: + n_c = m[layer_indices].shape[1] + data_i = cp.reshape(m[layer_indices], (len(layer_indices), -1)).T.get() + data_i = np.clip(data_i, -1, 1) + data.append(data_i) + if len(data) > 0: + data = np.concatenate(data, axis=1) + # check which has the highest value + n_components = 3 + pca = PCA(n_components=n_components).fit(data) + pca_descriptors = pca.transform(data) + img_pca = pca_descriptors.reshape(n_c, n_c, n_components) + comp = img_pca # [:, :, -3:] + comp_min = comp.min(axis=(0, 1)) + comp_max = comp.max(axis=(0, 1)) + if (comp_max - comp_min).any() != 0: + comp_img = np.divide((comp - comp_min), (comp_max - comp_min)) + pca_map = (comp_img * 255).astype(np.uint8) + r = np.asarray(pca_map[:, :, 0], dtype=np.uint32) + g = np.asarray(pca_map[:, :, 1], dtype=np.uint32) + b = np.asarray(pca_map[:, :, 2], dtype=np.uint32) + rgb_arr = np.array((r << 16) | (g << 8) | (b << 0), dtype=np.uint32) + rgb_arr.dtype = np.float32 + return cp.asarray(rgb_arr) + else: + return cp.zeros_like(elevation_map[0]) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/inpainting.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/inpainting.py new file mode 100644 index 00000000..3e926b00 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/inpainting.py @@ -0,0 +1,63 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +from typing import List +import cupyx.scipy.ndimage as ndimage +import numpy as np +import cv2 as cv + +from .plugin_manager import PluginBase + + +class Inpainting(PluginBase): + """ + This class is used for inpainting, a process of reconstructing lost or deteriorated parts of images and videos. + + Args: + cell_n (int): The number of cells. Default is 100. + method (str): The inpainting method. Options are 'telea' or 'ns' (Navier-Stokes). Default is 'telea'. + **kwargs (): Additional keyword arguments. + """ + + def __init__(self, cell_n: int = 100, method: str = "telea", **kwargs): + super().__init__() + if method == "telea": + self.method = cv.INPAINT_TELEA + elif method == "ns": # Navier-Stokes + self.method = cv.INPAINT_NS + else: # default method + self.method = cv.INPAINT_TELEA + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + *args, + ) -> cp.ndarray: + """ + + Args: + elevation_map (cupy._core.core.ndarray): + layer_names (List[str]): + plugin_layers (cupy._core.core.ndarray): + plugin_layer_names (List[str]): + *args (): + + Returns: + cupy._core.core.ndarray: + """ + mask = cp.asnumpy((elevation_map[2] < 0.5).astype("uint8")) + if (mask < 1).any(): + h = elevation_map[0] + h_max = float(h[mask < 1].max()) + h_min = float(h[mask < 1].min()) + h = cp.asnumpy((elevation_map[0] - h_min) * 255 / (h_max - h_min)).astype("uint8") + dst = np.array(cv.inpaint(h, mask, 1, self.method)) + h_inpainted = dst.astype(np.float32) * (h_max - h_min) / 255 + h_min + return cp.asarray(h_inpainted).astype(np.float64) + else: + return elevation_map[0] diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/max_filter.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/max_filter.py new file mode 100644 index 00000000..865470b9 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/max_filter.py @@ -0,0 +1,111 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import string +from typing import List + +from .plugin_manager import PluginBase + + +class MaxFilter(PluginBase): + """This is a filter to fill in invalid cells with maximum values around. + + ... + + Attributes + ---------- + width: int + width of the elevation map. + height: int + height of the elevation map. + dilation_size: int + The size of the patch to search for maximum value for each iteration. + iteration_n: int + The number of iteration to repeat the same filter. + """ + + def __init__(self, cell_n: int = 100, dilation_size: int = 5, iteration_n: int = 5, **kwargs): + super().__init__() + self.iteration_n = iteration_n + self.width = cell_n + self.height = cell_n + self.max_filtered = cp.zeros((self.width, self.height)) + self.max_filtered_mask = cp.zeros((self.width, self.height)) + self.max_filter_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U mask", + out_params="raw U newmap, raw U newmask", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ int get_relative_map_idx(int idx, int dx, int dy, int layer_n) { + const int layer = ${width} * ${height}; + const int relative_idx = idx + ${width} * dy + dx; + return layer * layer_n + relative_idx; + } + __device__ bool is_inside(int idx) { + int idx_x = idx / ${width}; + int idx_y = idx % ${width}; + if (idx_x <= 0 || idx_x >= ${width} - 1) { + return false; + } + if (idx_y <= 0 || idx_y >= ${height} - 1) { + return false; + } + return true; + } + """ + ).substitute(width=self.width, height=self.height), + operation=string.Template( + """ + U valid = mask[get_map_idx(i, 0)]; + if (valid < 0.5) { + U max_value = -1000000.0; + for (int dy = -${dilation_size}; dy <= ${dilation_size}; dy++) { + for (int dx = -${dilation_size}; dx <= ${dilation_size}; dx++) { + int idx = get_relative_map_idx(i, dx, dy, 0); + if (!is_inside(idx)) {continue;} + U valid = mask[idx]; + U value = map[idx]; + if(valid > 0.5 && value > max_value) { + max_value = value; + } + } + } + if (max_value > -1000000 + 1) { + newmap[get_map_idx(i, 0)] = max_value; + newmask[get_map_idx(i, 0)] = 0.6; + } + } + """ + ).substitute(dilation_size=dilation_size), + name="max_filter_kernel", + ) + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + ) -> cp.ndarray: + self.max_filtered = elevation_map[0].copy() + self.max_filtered_mask = elevation_map[2].copy() + for i in range(self.iteration_n): + self.max_filter_kernel( + self.max_filtered.copy(), + self.max_filtered_mask.copy(), + self.max_filtered, + self.max_filtered_mask, + size=(self.width * self.height), + ) + # If there's no more mask, break + if (self.max_filtered_mask > 0.5).all(): + break + max_filtered = cp.where(self.max_filtered_mask > 0.5, self.max_filtered.copy(), cp.nan) + return max_filtered \ No newline at end of file diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/max_layer_filter.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/max_layer_filter.py new file mode 100644 index 00000000..33bc069a --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/max_layer_filter.py @@ -0,0 +1,108 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +from typing import List + +from elevation_mapping_cupy.plugins.plugin_manager import PluginBase + + +class MaxLayerFilter(PluginBase): + """Applies a maximum filter to the input layers and updates the traversability map. + This can be used to enhance navigation by identifying traversable areas. + + Args: + cell_n (int): The width and height of the elevation map. + reverse (list): A list of boolean values indicating whether to reverse the filter operation for each layer. Default is [True]. + min_or_max (str): A string indicating whether to apply a minimum or maximum filter. Accepts "min" or "max". Default is "max". + layers (list): List of layers for semantic traversability. Default is ["traversability"]. + thresholds (list): List of thresholds for each layer. If the value is bigger than a threshold, assign 1.0 otherwise 0.0. If it is False, it does not apply. Default is [False]. + **kwargs: Additional keyword arguments. + """ + + def __init__( + self, + cell_n: int = 100, + layers: list = ["traversability"], + reverse: list = [True], + min_or_max: str = "max", + thresholds: list = [False], + scales: list = [1.0], + default_value: float = 0.0, + **kwargs, + ): + super().__init__() + self.layers = layers + self.reverse = reverse + self.min_or_max = min_or_max + self.thresholds = thresholds + self.scales = scales + self.default_value = default_value + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_map: cp.ndarray, + semantic_layer_names: List[str], + *args, + ) -> cp.ndarray: + """ + + Args: + elevation_map (cupy._core.core.ndarray): + layer_names (List[str]): + plugin_layers (cupy._core.core.ndarray): + plugin_layer_names (List[str]): + semantic_map (elevation_mapping_cupy.semantic_map.SemanticMap): + *args (): + + Returns: + cupy._core.core.ndarray: + """ + layers = [] + for it, name in enumerate(self.layers): + layer = self.get_layer_data( + elevation_map, layer_names, plugin_layers, plugin_layer_names, semantic_map, semantic_layer_names, name + ) + if layer is None: + continue + if isinstance(self.default_value, float): + layer = cp.where(layer == 0.0, float(self.default_value), layer) + elif isinstance(self.default_value, str): + default_layer = self.get_layer_data( + elevation_map, + layer_names, + plugin_layers, + plugin_layer_names, + semantic_map, + semantic_layer_names, + self.default_value, + ) + layer = cp.where(layer == 0, default_layer, layer) + if self.reverse[it]: + layer = 1.0 - layer + if len(self.scales) > it and isinstance(self.scales[it], float): + layer = layer * float(self.scales[it]) + if isinstance(self.thresholds[it], float): + layer = cp.where(layer > float(self.thresholds[it]), 1, 0) + layers.append(layer) + if len(layers) == 0: + print("No layers are found, returning traversability!") + if isinstance(self.default_value, float): + layer = cp.ones_like(elevation_map[0]) + layer *= float(self.default_value) + return layer + else: + idx = layer_names.index("traversability") + return elevation_map[idx] + result = cp.stack(layers, axis=0) + if self.min_or_max == "min": + result = cp.min(result, axis=0) + else: + result = cp.max(result, axis=0) + return result diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/min_filter.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/min_filter.py new file mode 100644 index 00000000..ebf4300a --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/min_filter.py @@ -0,0 +1,118 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import string +from typing import List + +from .plugin_manager import PluginBase + + +class MinFilter(PluginBase): + """This is a filter to fill in invalid cells with minimum values around. + + Args: + cell_n (int): width of the elevation map. + dilation_size (int): The size of the patch to search for minimum value for each iteration. + iteration_n (int): The number of iteration to repeat the same filter. + **kwargs (): + """ + + def __init__(self, cell_n: int = 100, dilation_size: int = 5, iteration_n: int = 5, **kwargs): + super().__init__() + self.iteration_n = iteration_n + self.width = cell_n + self.height = cell_n + self.min_filtered = cp.zeros((self.width, self.height)) + self.min_filtered_mask = cp.zeros((self.width, self.height)) + self.min_filter_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U mask", + out_params="raw U newmap, raw U newmask", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ int get_relative_map_idx(int idx, int dx, int dy, int layer_n) { + const int layer = ${width} * ${height}; + const int relative_idx = idx + ${width} * dy + dx; + return layer * layer_n + relative_idx; + } + __device__ bool is_inside(int idx) { + int idx_x = idx / ${width}; + int idx_y = idx % ${width}; + if (idx_x <= 0 || idx_x >= ${width} - 1) { + return false; + } + if (idx_y <= 0 || idx_y >= ${height} - 1) { + return false; + } + return true; + } + """ + ).substitute(width=self.width, height=self.height), + operation=string.Template( + """ + U h = map[get_map_idx(i, 0)]; + U valid = mask[get_map_idx(i, 0)]; + if (valid < 0.5) { + U min_value = 1000000.0; + for (int dy = -${dilation_size}; dy <= ${dilation_size}; dy++) { + for (int dx = -${dilation_size}; dx <= ${dilation_size}; dx++) { + int idx = get_relative_map_idx(i, dx, dy, 0); + if (!is_inside(idx)) {continue;} + U valid = newmask[idx]; + U value = newmap[idx]; + if(valid > 0.5 && value < min_value) { + min_value = value; + } + } + } + if (min_value < 1000000 - 1) { + newmap[get_map_idx(i, 0)] = min_value; + newmask[get_map_idx(i, 0)] = 0.6; + } + } + """ + ).substitute(dilation_size=dilation_size), + name="min_filter_kernel", + ) + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + *args, + ) -> cp.ndarray: + """ + + Args: + elevation_map (cupy._core.core.ndarray): + layer_names (List[str]): + plugin_layers (cupy._core.core.ndarray): + plugin_layer_names (List[str]): + *args (): + + Returns: + cupy._core.core.ndarray: + """ + self.min_filtered = elevation_map[0].copy() + self.min_filtered_mask = elevation_map[2].copy() + for i in range(self.iteration_n): + self.min_filter_kernel( + elevation_map[0], + elevation_map[2], + self.min_filtered, + self.min_filtered_mask, + size=(self.width * self.height), + ) + # If there's no more mask, break + if (self.min_filtered_mask > 0.5).all(): + break + min_filtered = cp.where(self.min_filtered_mask > 0.5, self.min_filtered.copy(), cp.nan) + return min_filtered diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/plugin_manager.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/plugin_manager.py new file mode 100644 index 00000000..048ca016 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/plugin_manager.py @@ -0,0 +1,267 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +from abc import ABC +import cupy as cp +from typing import List, Dict, Optional +import importlib +import inspect +from dataclasses import dataclass +from ruamel.yaml import YAML +from inspect import signature + + +@dataclass +class PluginParams: + name: str + layer_name: str + fill_nan: bool = False # fill nan to invalid region + is_height_layer: bool = False # if this is a height layer + + +class PluginBase(ABC): + """ + This is a base class of Plugins + """ + + def __init__(self, *args, **kwargs): + """ + + Args: + plugin_params : PluginParams + The parameter of callback + """ + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_map: cp.ndarray, + semantic_layer_names: List[str], + *args, + **kwargs, + ) -> cp.ndarray: + """This gets the elevation map data and plugin layers as a cupy array. + + + Args: + elevation_map (): + layer_names (): + plugin_layers (): + plugin_layer_names (): + semantic_map (): + semantic_layer_names (): + + Run your processing here and return the result. + layer of elevation_map 0: elevation + 1: variance + 2: is_valid + 3: traversability + 4: time + 5: upper_bound + 6: is_upper_bound + You can also access to the other plugins' layer with plugin_layers and plugin_layer_names + + """ + pass + + def get_layer_data( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_map: cp.ndarray, + semantic_layer_names: List[str], + name: str, + ) -> Optional[cp.ndarray]: + """ + Retrieve a copy of the layer data from the elevation, plugin, or semantic maps based on the layer name. + + Args: + elevation_map (cp.ndarray): The elevation map containing various layers. + layer_names (List[str]): A list of names for each layer in the elevation map. + plugin_layers (cp.ndarray): The plugin layers containing additional data. + plugin_layer_names (List[str]): A list of names for each layer in the plugin layers. + semantic_map (cp.ndarray): The semantic map containing various layers. + semantic_layer_names (List[str]): A list of names for each layer in the semantic map. + name (str): The name of the layer to retrieve. + + Returns: + Optional[cp.ndarray]: A copy of the requested layer as a cupy ndarray if found, otherwise None. + """ + if name in layer_names: + idx = layer_names.index(name) + layer = elevation_map[idx].copy() + elif name in plugin_layer_names: + idx = plugin_layer_names.index(name) + layer = plugin_layers[idx].copy() + elif name in semantic_layer_names: + idx = semantic_layer_names.index(name) + layer = semantic_map[idx].copy() + else: + print(f"Could not find layer {name}!") + layer = None + return layer + + +class PluginManager(object): + """ + This manages the plugins. + """ + + def __init__(self, cell_n: int): + self.cell_n = cell_n + + def init(self, plugin_params: List[PluginParams], extra_params: List[Dict]): + self.plugin_params = plugin_params + + self.plugins = [] + for param, extra_param in zip(plugin_params, extra_params): + m = importlib.import_module("." + param.name, package="elevation_mapping_cupy.plugins") # -> 'module' + for name, obj in inspect.getmembers(m): + if inspect.isclass(obj) and issubclass(obj, PluginBase) and name != "PluginBase": + # Add cell_n to params + extra_param["cell_n"] = self.cell_n + self.plugins.append(obj(**extra_param)) + self.layers = cp.zeros((len(self.plugins), self.cell_n, self.cell_n), dtype=cp.float32) + self.layer_names = self.get_layer_names() + self.plugin_names = self.get_plugin_names() + + def load_plugin_settings(self, file_path: str): + cfg = YAML().load(open(file_path, "r")) + plugin_params = [] + extra_params = [] + for k, v in cfg.items(): + if v["enable"]: + plugin_params.append( + PluginParams( + name=k if not "type" in v else v["type"], + layer_name=v["layer_name"], + fill_nan=v["fill_nan"], + is_height_layer=v["is_height_layer"], + ) + ) + extra_params.append(v["extra_params"]) + self.init(plugin_params, extra_params) + print("Loaded plugins are ", *self.plugin_names) + + def get_layer_names(self): + names = [] + for obj in self.plugin_params: + names.append(obj.layer_name) + return names + + def get_plugin_names(self): + names = [] + for obj in self.plugin_params: + names.append(obj.name) + return names + + def get_plugin_index_with_name(self, name: str) -> int: + try: + idx = self.plugin_names.index(name) + return idx + except Exception as e: + print("Error with plugin {}: {}".format(name, e)) + return None + + def get_layer_index_with_name(self, name: str) -> int: + try: + idx = self.layer_names.index(name) + return idx + except Exception as e: + print("Error with layer {}: {}".format(name, e)) + return None + + def update_with_name( + self, + name: str, + elevation_map: cp.ndarray, + layer_names: List[str], + semantic_map=None, + semantic_params=None, + rotation=None, + elements_to_shift={}, + ): + idx = self.get_layer_index_with_name(name) + if idx is not None and idx < len(self.plugins): + n_param = len(signature(self.plugins[idx]).parameters) + if n_param == 5: + self.layers[idx] = self.plugins[idx](elevation_map, layer_names, self.layers, self.layer_names) + elif n_param == 7: + self.layers[idx] = self.plugins[idx]( + elevation_map, + layer_names, + self.layers, + self.layer_names, + semantic_map, + semantic_params, + ) + elif n_param == 8: + self.layers[idx] = self.plugins[idx]( + elevation_map, + layer_names, + self.layers, + self.layer_names, + semantic_map, + semantic_params, + rotation, + ) + else: + self.layers[idx] = self.plugins[idx]( + elevation_map, + layer_names, + self.layers, + self.layer_names, + semantic_map, + semantic_params, + rotation, + elements_to_shift, + ) + + def get_map_with_name(self, name: str) -> cp.ndarray: + idx = self.get_layer_index_with_name(name) + if idx is not None: + return self.layers[idx] + + def get_param_with_name(self, name: str) -> PluginParams: + idx = self.get_layer_index_with_name(name) + if idx is not None: + return self.plugin_params[idx] + + +if __name__ == "__main__": + plugins = [ + PluginParams(name="min_filter", layer_name="min_filter"), + PluginParams(name="smooth_filter", layer_name="smooth"), + ] + extra_params = [ + {"dilation_size": 5, "iteration_n": 5}, + {"input_layer_name": "elevation2"}, + ] + manager = PluginManager(200) + manager.load_plugin_settings("../config/plugin_config.yaml") + print(manager.layer_names) + print(manager.plugin_names) + elevation_map = cp.zeros((7, 200, 200)).astype(cp.float32) + layer_names = [ + "elevation", + "variance", + "is_valid", + "traversability", + "time", + "upper_bound", + "is_upper_bound", + ] + elevation_map[0] = cp.random.randn(200, 200) + elevation_map[2] = cp.abs(cp.random.randn(200, 200)) + print("map", elevation_map[0]) + print("layer map ", manager.layers) + manager.update_with_name("min_filter", elevation_map, layer_names) + manager.update_with_name("smooth_filter", elevation_map, layer_names) + # manager.update_with_name("sem_fil", elevation_map, layer_names, semantic_map=semantic_map) + print(manager.get_map_with_name("smooth")) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/robot_centric_elevation.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/robot_centric_elevation.py new file mode 100644 index 00000000..80ac73a3 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/robot_centric_elevation.py @@ -0,0 +1,121 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import string +from typing import List +from .plugin_manager import PluginBase + + +class RobotCentricElevation(PluginBase): + """Generates an elevation map with respect to the robot frame. + + Args: + cell_n (int): + resolution (ruamel.yaml.scalarfloat.ScalarFloat): + threshold (ruamel.yaml.scalarfloat.ScalarFloat): + use_threshold (bool): + **kwargs (): + """ + + def __init__( + self, cell_n: int = 100, resolution: float = 0.05, threshold: float = 0.4, use_threshold: bool = 0, **kwargs + ): + super().__init__() + self.width = cell_n + self.height = cell_n + self.min_filtered = cp.zeros((self.width, self.height), dtype=cp.float32) + + self.base_elevation_kernel = cp.ElementwiseKernel( + in_params="raw U map, raw U mask, raw U R", + out_params="raw U newmap", + preamble=string.Template( + """ + __device__ int get_map_idx(int idx, int layer_n) { + const int layer = ${width} * ${height}; + return layer * layer_n + idx; + } + + __device__ bool is_inside(int idx) { + int idx_x = idx / ${width}; + int idx_y = idx % ${width}; + if (idx_x <= 0 || idx_x >= ${width} - 1) { + return false; + } + if (idx_y <= 0 || idx_y >= ${height} - 1) { + return false; + } + return true; + } + __device__ float get_map_x(int idx){ + float idx_x = idx / ${width}* ${resolution}; + return idx_x; + } + __device__ float get_map_y(int idx){ + float idx_y = idx % ${width}* ${resolution}; + return idx_y; + } + __device__ float transform_p(float x, float y, float z, + float r0, float r1, float r2) { + return r0 * x + r1 * y + r2 * z ; + } + """ + ).substitute(width=self.width, height=self.height, resolution=resolution), + operation=string.Template( + """ + U rz = map[get_map_idx(i, 0)]; + U valid = mask[get_map_idx(i, 0)]; + if (valid > 0.5) { + U rx = get_map_x(get_map_idx(i, 0)); + U ry = get_map_y(get_map_idx(i, 0)); + U x_b = transform_p(rx, ry, rz, R[0], R[1], R[2]); + U y_b = transform_p(rx, ry, rz, R[3], R[4], R[5]); + U z_b = transform_p(rx, ry, rz, R[6], R[7], R[8]); + if (${use_threshold} && z_b>= ${threshold} ) { + newmap[get_map_idx(i, 0)] = 1.0; + } + else if (${use_threshold} && z_b< ${threshold} ){ + newmap[get_map_idx(i, 0)] = 0.0; + } + else{ + newmap[get_map_idx(i, 0)] = z_b; + } + } + """ + ).substitute(threshold=threshold, use_threshold=int(use_threshold)), + name="base_elevation_kernel", + ) + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_map: cp.ndarray, + semantic_layer_names: List[str], + rotation, + *args, + ) -> cp.ndarray: + """ + + Args: + elevation_map (cupy._core.core.ndarray): + layer_names (List[str]): + plugin_layers (cupy._core.core.ndarray): + plugin_layer_names (List[str]): + semantic_map (elevation_mapping_cupy.semantic_map.SemanticMap): + rotation (cupy._core.core.ndarray): + *args (): + + Returns: + cupy._core.core.ndarray: + """ + # Process maps here + # check that transform is a ndarray + self.min_filtered = elevation_map[0].copy() + self.base_elevation_kernel( + elevation_map[0], elevation_map[2], rotation, self.min_filtered, size=(self.width * self.height), + ) + return self.min_filtered diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/semantic_filter.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/semantic_filter.py new file mode 100644 index 00000000..0ec56010 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/semantic_filter.py @@ -0,0 +1,133 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +from typing import List +import re + +from elevation_mapping_cupy.plugins.plugin_manager import PluginBase + + +class SemanticFilter(PluginBase): + """This is a filter to create a one hot encoded map of the class probabilities. + + Args: + cell_n (int): width and height of the elevation map. + classes (list): List of classes for semantic filtering. Default is ["person", "grass"]. + **kwargs: Additional keyword arguments. + """ + + def __init__( + self, cell_n: int = 100, classes: list = ["person", "grass"], **kwargs, + ): + super().__init__() + self.indices = [] + self.classes = classes + self.color_encoding = self.transform_color() + + def color_map(self, N: int = 256, normalized: bool = False): + """ + Creates a color map with N different colors. + + Args: + N (int, optional): The number of colors in the map. Defaults to 256. + normalized (bool, optional): If True, the colors are normalized to the range [0,1]. Defaults to False. + + Returns: + np.ndarray: The color map. + """ + + def bitget(byteval, idx): + return (byteval & (1 << idx)) != 0 + + dtype = "float32" if normalized else "uint8" + cmap = np.zeros((N + 1, 3), dtype=dtype) + for i in range(N + 1): + r = g = b = 0 + c = i + for j in range(8): + r = r | (bitget(c, 0) << 7 - j) + g = g | (bitget(c, 1) << 7 - j) + b = b | (bitget(c, 2) << 7 - j) + c = c >> 3 + + cmap[i] = np.array([r, g, b]) + + cmap = cmap / 255 if normalized else cmap + cmap[1] = np.array([81, 113, 162]) + cmap[2] = np.array([81, 113, 162]) + cmap[3] = np.array([188, 63, 59]) + return cmap[1:] + + def transform_color(self): + """ + Transforms the color map into a format that can be used for semantic filtering. + + Returns: + cp.ndarray: The transformed color map. + """ + color_classes = self.color_map(255) + r = np.asarray(color_classes[:, 0], dtype=np.uint32) + g = np.asarray(color_classes[:, 1], dtype=np.uint32) + b = np.asarray(color_classes[:, 2], dtype=np.uint32) + rgb_arr = np.array((r << 16) | (g << 8) | (b << 0), dtype=np.uint32) + rgb_arr.dtype = np.float32 + return cp.asarray(rgb_arr) + + def get_layer_indices(self, layer_names: List[str]) -> List[int]: + """ Get the indices of the layers that are to be processed using regular expressions. + Args: + layer_names (List[str]): List of layer names. + Returns: + List[int]: List of layer indices. + """ + indices = [] + for i, layer_name in enumerate(layer_names): + if any(re.match(pattern, layer_name) for pattern in self.classes): + indices.append(i) + return indices + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_map: cp.ndarray, + semantic_layer_names: List[str], + rotation, + elements_to_shift, + *args, + ) -> cp.ndarray: + """ + + Args: + elevation_map (cupy._core.core.ndarray): + layer_names (List[str]): + plugin_layers (cupy._core.core.ndarray): + plugin_layer_names (List[str]): + semantic_map (elevation_mapping_cupy.semantic_map.SemanticMap): + *args (): + + Returns: + cupy._core.core.ndarray: + """ + # get indices of all layers that contain semantic class information + data = [] + for m, layer_names in zip( + [elevation_map, plugin_layers, semantic_map], [layer_names, plugin_layer_names, semantic_layer_names] + ): + layer_indices = self.get_layer_indices(layer_names) + if len(layer_indices) > 0: + data.append(m[layer_indices]) + if len(data) > 0: + data = cp.concatenate(data, axis=0) + class_map = cp.amax(data, axis=0) + class_map_id = cp.argmax(data, axis=0) + else: + class_map = cp.zeros_like(elevation_map[0]) + class_map_id = cp.zeros_like(elevation_map[0], dtype=cp.int32) + enc = self.color_encoding[class_map_id] + return enc diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/semantic_traversability.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/semantic_traversability.py new file mode 100644 index 00000000..5f6ef1d5 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/semantic_traversability.py @@ -0,0 +1,83 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +import numpy as np +from typing import List + +from elevation_mapping_cupy.plugins.plugin_manager import PluginBase + + +class SemanticTraversability(PluginBase): + """Extracts traversability and elevations from layers and generates an updated traversability that can be used by checker. + + Args: + cell_n (int): The width and height of the elevation map. + layers (list): List of layers for semantic traversability. Default is ["traversability"]. + thresholds (list): List of thresholds for each layer. Default is [0.5]. + type (list): List of types for each layer. Default is ["traversability"]. + **kwargs: Additional keyword arguments. + """ + + def __init__( + self, + cell_n: int = 100, + layers: list = ["traversability"], + thresholds: list = [0.5], + type: list = ["traversability"], + **kwargs, + ): + super().__init__() + self.layers = layers + self.thresholds = cp.asarray(thresholds) + self.type = type + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + semantic_map: cp.ndarray, + semantic_layer_names: List[str], + *args, + ) -> cp.ndarray: + """ + + Args: + elevation_map (cupy._core.core.ndarray): + layer_names (List[str]): + plugin_layers (cupy._core.core.ndarray): + plugin_layer_names (List[str]): + semantic_map (elevation_mapping_cupy.semantic_map.SemanticMap): + *args (): + + Returns: + cupy._core.core.ndarray: + """ + # get indices of all layers that + map = cp.zeros(elevation_map[2].shape, np.float32) + tempo = cp.zeros(elevation_map[2].shape, np.float32) + for it, name in enumerate(self.layers): + if name in layer_names: + idx = layer_names.index(name) + tempo = elevation_map[idx] + # elif name in semantic_params.additional_layers: + # idx = semantic_params.additional_layers.index(name) + # tempo = semantic_map[idx] + elif name in plugin_layer_names: + idx = plugin_layer_names.index(name) + tempo = plugin_layers[idx] + else: + print("Layer {} is not in the map, returning traversabiltiy!".format(name)) + return + if self.type[it] == "traversability": + tempo = cp.where(tempo <= self.thresholds[it], 1, 0) + map += tempo + else: + tempo = cp.where(tempo >= self.thresholds[it], 1, 0) + map += tempo + map = cp.where(map <= 0.9, 0.1, 1) + + return map diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/plugins/smooth_filter.py b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/smooth_filter.py new file mode 100644 index 00000000..ae48b688 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/plugins/smooth_filter.py @@ -0,0 +1,59 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp +from typing import List +import cupyx.scipy.ndimage as ndimage + +from .plugin_manager import PluginBase + + +class SmoothFilter(PluginBase): + """ + SmoothFilter is a class that applies a smoothing filter + to the elevation map. The filter is applied to the layer specified by the input_layer_name parameter. + If the specified layer is not found, the filter is applied to the elevation layer. + + Args: + cell_n (int): The width and height of the elevation map. Default is 100. + input_layer_name (str): The name of the layer to which the filter should be applied. Default is "elevation". + **kwargs: Additional keyword arguments. + """ + + def __init__(self, cell_n: int = 100, input_layer_name: str = "elevation", **kwargs): + super().__init__() + self.input_layer_name = input_layer_name + + def __call__( + self, + elevation_map: cp.ndarray, + layer_names: List[str], + plugin_layers: cp.ndarray, + plugin_layer_names: List[str], + *args, + ) -> cp.ndarray: + """ + + Args: + elevation_map (cupy._core.core.ndarray): + layer_names (List[str]): + plugin_layers (cupy._core.core.ndarray): + plugin_layer_names (List[str]): + *args (): + + Returns: + cupy._core.core.ndarray: + """ + if self.input_layer_name in layer_names: + idx = layer_names.index(self.input_layer_name) + h = elevation_map[idx] + elif self.input_layer_name in plugin_layer_names: + idx = plugin_layer_names.index(self.input_layer_name) + h = plugin_layers[idx] + else: + print("layer name {} was not found. Using elevation layer.".format(self.input_layer_name)) + h = elevation_map[0] + hs1 = ndimage.uniform_filter(h, size=3) + hs1 = ndimage.uniform_filter(hs1, size=3) + return hs1 diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/semantic_map.py b/elevation_mapping_cupy/elevation_mapping_cupy/semantic_map.py new file mode 100644 index 00000000..390597b6 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/semantic_map.py @@ -0,0 +1,400 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +from elevation_mapping_cupy.parameter import Parameter +import cupy as cp +import numpy as np +from typing import List, Dict +import re + + +from elevation_mapping_cupy.fusion.fusion_manager import FusionManager + +xp = cp + + +class SemanticMap: + def __init__(self, param: Parameter): + """ + + Args: + param (elevation_mapping_cupy.parameter.Parameter): + """ + + self.param = param + + self.layer_specs_points = {} + self.layer_specs_image = {} + self.layer_names = [] + self.unique_fusion = [] + self.unique_data = [] + self.elements_to_shift = {} + + self.unique_fusion = self.param.fusion_algorithms + + self.amount_layer_names = len(self.layer_names) + + self.semantic_map = xp.zeros( + (self.amount_layer_names, self.param.cell_n, self.param.cell_n), dtype=param.data_type, + ) + self.new_map = xp.zeros((self.amount_layer_names, self.param.cell_n, self.param.cell_n), param.data_type,) + # which layers should be reset to zero at each update, per default everyone, + # if a layer should not be reset, it is defined in compile_kernels function + self.delete_new_layers = cp.ones(self.new_map.shape[0], cp.bool_) + self.fusion_manager = FusionManager(self.param) + + def clear(self): + """Clear the semantic map.""" + self.semantic_map *= 0.0 + + def initialize_fusion(self): + """Initialize the fusion algorithms.""" + for fusion in self.unique_fusion: + if "pointcloud_class_bayesian" == fusion: + pcl_ids = self.get_layer_indices("class_bayesian", self.layer_specs_points) + self.delete_new_layers[pcl_ids] = 0 + if "pointcloud_class_max" == fusion: + pcl_ids = self.get_layer_indices("class_max", self.layer_specs_points) + self.delete_new_layers[pcl_ids] = 0 + layer_cnt = self.param.fusion_algorithms.count("class_max") + id_max = cp.zeros((layer_cnt, self.param.cell_n, self.param.cell_n), dtype=cp.uint32,) + self.elements_to_shift["id_max"] = id_max + self.fusion_manager.register_plugin(fusion) + + def update_fusion_setting(self): + """ + Update the fusion settings. + """ + for fusion in self.unique_fusion: + if "pointcloud_class_bayesian" == fusion: + pcl_ids = self.get_layer_indices("class_bayesian", self.layer_specs_points) + self.delete_new_layers[pcl_ids] = 0 + if "pointcloud_class_max" == fusion: + pcl_ids = self.get_layer_indices("class_max", self.layer_specs_points) + self.delete_new_layers[pcl_ids] = 0 + layer_cnt = self.param.fusion_algorithms.count("class_max") + id_max = cp.zeros((layer_cnt, self.param.cell_n, self.param.cell_n), dtype=cp.uint32,) + self.elements_to_shift["id_max"] = id_max + + def add_layer(self, name): + """ + Add a new layer to the semantic map. + + Args: + name (str): The name of the new layer. + """ + if name not in self.layer_names: + self.layer_names.append(name) + self.semantic_map = cp.append( + self.semantic_map, + cp.zeros((1, self.param.cell_n, self.param.cell_n), dtype=self.param.data_type), + axis=0, + ) + self.new_map = cp.append( + self.new_map, cp.zeros((1, self.param.cell_n, self.param.cell_n), dtype=self.param.data_type), axis=0, + ) + self.delete_new_layers = cp.append(self.delete_new_layers, cp.array([1], dtype=cp.bool_)) + + def pad_value(self, x, shift_value, idx=None, value=0.0): + """Create a padding of the map along x,y-axis according to amount that has shifted. + + Args: + x (cupy._core.core.ndarray): + shift_value (cupy._core.core.ndarray): + idx (Union[None, int, None, None]): + value (float): + """ + if idx is None: + if shift_value[0] > 0: + x[:, : shift_value[0], :] = value + elif shift_value[0] < 0: + x[:, shift_value[0] :, :] = value + if shift_value[1] > 0: + x[:, :, : shift_value[1]] = value + elif shift_value[1] < 0: + x[:, :, shift_value[1] :] = value + else: + if shift_value[0] > 0: + x[idx, : shift_value[0], :] = value + elif shift_value[0] < 0: + x[idx, shift_value[0] :, :] = value + if shift_value[1] > 0: + x[idx, :, : shift_value[1]] = value + elif shift_value[1] < 0: + x[idx, :, shift_value[1] :] = value + + def shift_map_xy(self, shift_value): + """Shift the map along x,y-axis according to shift values. + + Args: + shift_value: + """ + self.semantic_map = cp.roll(self.semantic_map, shift_value, axis=(1, 2)) + self.pad_value(self.semantic_map, shift_value, value=0.0) + self.new_map = cp.roll(self.new_map, shift_value, axis=(1, 2)) + self.pad_value(self.new_map, shift_value, value=0.0) + for el in self.elements_to_shift.values(): + el = cp.roll(el, shift_value, axis=(1, 2)) + self.pad_value(el, shift_value, value=0.0) + + def get_fusion( + self, channels: List[str], channel_fusions: Dict[str, str], layer_specs: Dict[str, str] + ) -> List[str]: + """Get all fusion algorithms that need to be applied to a specific pointcloud. + + Args: + channels (List[str]): + """ + fusion_list = [] + process_channels = [] + for channel in channels: + if channel not in layer_specs: + # If the channel is not in the layer_specs, we use the default fusion algorithm + matched_fusion = self.get_matching_fusion(channel, channel_fusions) + if matched_fusion is None: + if "default" in channel_fusions: + default_fusion = channel_fusions["default"] + print( + f"[WARNING] Layer {channel} not found in layer_specs. Using {default_fusion} algorithm as default." + ) + layer_specs[channel] = default_fusion + self.update_fusion_setting() + # If there's no default fusion algorithm, we skip this channel + else: + print( + f"[WARNING] Layer {channel} not found in layer_specs ({layer_specs}) and no default fusion is configured. Skipping." + ) + continue + else: + layer_specs[channel] = matched_fusion + self.update_fusion_setting() + x = layer_specs[channel] + fusion_list.append(x) + process_channels.append(channel) + return process_channels, fusion_list + + def get_matching_fusion(self, channel: str, fusion_algs: Dict[str, str]): + """ Use regular expression to check if the fusion algorithm matches the channel name.""" + for fusion_alg, alg_value in fusion_algs.items(): + if re.match(f"^{fusion_alg}$", channel): + return alg_value + return None + + def get_layer_indices(self, fusion_alg, layer_specs): + """Get the indices of the layers that are used for a specific fusion algorithm. + + Args: + fusion_alg(str): fusion algorithm name + + Returns: + cp.array: indices of the layers + """ + layer_indices = cp.array([], dtype=cp.int32) + for it, (key, val) in enumerate(layer_specs.items()): + if key in val == fusion_alg: + layer_indices = cp.append(layer_indices, it).astype(cp.int32) + return layer_indices + + def get_indices_fusion(self, pcl_channels: List[str], fusion_alg: str, layer_specs: Dict[str, str]): + """Computes the indices of the channels of the pointcloud and the layers of the semantic map of type fusion_alg. + + Args: + pcl_channels (List[str]): list of all channel names + fusion_alg (str): fusion algorithm type we want to use for channel selection + + Returns: + Union[Tuple[List[int], List[int]], Tuple[cupy._core.core.ndarray, cupy._core.core.ndarray]]: + + + """ + # this contains exactly the fusion alg type for each channel of the pcl + pcl_val_list = [layer_specs[x] for x in pcl_channels] + # this contains the indices of the point cloud where we have to perform a certain fusion + pcl_indices = cp.array([idp + 3 for idp, x in enumerate(pcl_val_list) if x == fusion_alg], dtype=cp.int32,) + # create a list of indices of the layers that will be updated by the point cloud with specific fusion alg + layer_indices = cp.array([], dtype=cp.int32) + for it, (key, val) in enumerate(layer_specs.items()): + if key in pcl_channels and val == fusion_alg: + layer_idx = self.layer_names.index(key) + layer_indices = cp.append(layer_indices, layer_idx).astype(cp.int32) + return pcl_indices, layer_indices + + def update_layers_pointcloud(self, points_all, channels, R, t, elevation_map): + """Update the semantic map with the pointcloud. + + Args: + points_all: semantic point cloud + channels: list of channel names + R: rotation matrix + t: translation vector + elevation_map: elevation map object + """ + process_channels, additional_fusion = self.get_fusion( + channels, self.param.pointcloud_channel_fusions, self.layer_specs_points + ) + # If channels has a new layer that is not in the semantic map, add it + for channel in process_channels: + if channel not in self.layer_names: + print(f"Layer {channel} not found, adding it to the semantic map") + self.add_layer(channel) + + # Resetting new_map for the layers that are to be deleted + self.new_map[self.delete_new_layers] = 0.0 + for fusion in list(set(additional_fusion)): + # which layers need to be updated with this fusion algorithm + pcl_ids, layer_ids = self.get_indices_fusion(process_channels, fusion, self.layer_specs_points) + # update the layers with the fusion algorithm + self.fusion_manager.execute_plugin( + fusion, + points_all, + R, + t, + pcl_ids, + layer_ids, + elevation_map, + self.semantic_map, + self.new_map, + self.elements_to_shift, + ) + + def update_layers_image( + self, + # sub_key: str, + image: cp._core.core.ndarray, + channels: List[str], + # fusion_methods: List[str], + uv_correspondence: cp._core.core.ndarray, + valid_correspondence: cp._core.core.ndarray, + image_height: cp._core.core.ndarray, + image_width: cp._core.core.ndarray, + ): + """Update the semantic map with the new image. + + Args: + sub_key: + image: + uv_correspondence: + valid_correspondence: + image_height: + image_width: + """ + + process_channels, fusion_methods = self.get_fusion( + channels, self.param.image_channel_fusions, self.layer_specs_image + ) + self.new_map[self.delete_new_layers] = 0.0 + for j, (fusion, channel) in enumerate(zip(fusion_methods, process_channels)): + if channel not in self.layer_names: + print(f"Layer {channel} not found, adding it to the semantic map") + self.add_layer(channel) + sem_map_idx = self.get_index(channel) + + if sem_map_idx == -1: + print(f"Layer {channel} not found!") + return + + # update the layers with the fusion algorithm + self.fusion_manager.execute_image_plugin( + fusion, + cp.uint64(sem_map_idx), + image, + j, + uv_correspondence, + valid_correspondence, + image_height, + image_width, + self.semantic_map, + self.new_map, + ) + + def decode_max(self, mer): + """Decode the float32 value into two 16 bit value containing the class probability and the class id. + + Args: + mer: + + Returns: + cp.array: probability + cp.array: class id + """ + mer = mer.astype(cp.float32) + mer = mer.view(dtype=cp.uint32) + ma = cp.bitwise_and(mer, 0xFFFF, dtype=np.uint16) + ma = ma.view(np.float16) + ma = ma.astype(np.float32) + ind = cp.right_shift(mer, 16) + return ma, ind + + def get_map_with_name(self, name): + """Return the map with the given name. + + Args: + name: layer name + + Returns: + cp.array: map + """ + # If the layer is a color layer, return the rgb map + if name in self.layer_specs_points and self.layer_specs_points[name] == "color": + m = self.get_rgb(name) + return m + elif name in self.layer_specs_image and self.layer_specs_image[name] == "color": + m = self.get_rgb(name) + return m + else: + m = self.get_semantic(name) + return m + + def get_rgb(self, name): + """Return the rgb map with the given name. + + Args: + name: + + Returns: + cp.array: rgb map + """ + idx = self.layer_names.index(name) + c = self.process_map_for_publish(self.semantic_map[idx]) + c = c.astype(np.float32) + return c + + def get_semantic(self, name): + """Return the semantic map layer with the given name. + + Args: + name(str): layer name + + Returns: + cp.array: semantic map layer + """ + idx = self.layer_names.index(name) + c = self.process_map_for_publish(self.semantic_map[idx]) + return c + + def process_map_for_publish(self, input_map): + """Remove padding. + + Args: + input_map(cp.array): map layer + + Returns: + cp.array: map layer without padding + """ + m = input_map.copy() + return m[1:-1, 1:-1] + + def get_index(self, name): + """Return the index of the layer with the given name. + + Args: + name(str): + + Returns: + int: index + """ + if name not in self.layer_names: + return -1 + else: + return self.layer_names.index(name) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/tests/__init__.py b/elevation_mapping_cupy/elevation_mapping_cupy/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/tests/plugin_config.yaml b/elevation_mapping_cupy/elevation_mapping_cupy/tests/plugin_config.yaml new file mode 100644 index 00000000..0e8f67c9 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/tests/plugin_config.yaml @@ -0,0 +1,66 @@ +# Settings of the plugins. (The plugins should be stored in script/plugins) + +# min_filter fills in minimum value around the invalid cell. +min_filter: + enable: True # weather to load this plugin + fill_nan: False # Fill nans to invalid cells of elevation layer. + is_height_layer: True # If this is a height layer (such as elevation) or not (such as traversability) + layer_name: "min_filter" # The layer name. + extra_params: # This params are passed to the plugin class on initialization. + dilation_size: 1 # The patch size to apply + iteration_n: 30 # The number of iterations +# Apply smoothing. +smooth_filter: + enable: True + fill_nan: False + is_height_layer: True + layer_name: "smooth" + extra_params: + input_layer_name: "min_filter" +# Apply inpainting using opencv +inpainting: + enable: True + fill_nan: False + is_height_layer: True + layer_name: "inpaint" + extra_params: + method: "telea" # telea or ns +# Apply smoothing for inpainted layer +smooth_filter_1: + type: "smooth_filter" + enable: True + fill_nan: False + is_height_layer: True + layer_name: "smooth_1" + extra_params: + input_layer_name: "inpaint" +robot_centric_elevation: # Use the same name as your file name. + # type: "robot_centric_elevation" + enable: True # weather to load this plugin + fill_nan: False # Fill nans to invalid cells of elevation layer. + is_height_layer: True # If this is a height layer (such as elevation) or not (such as traversability) + layer_name: "robot_centric_elevation" # The layer name. + extra_params: # This params are passed to the plugin class on initialization. + # add_value: 2.0 # Example param + resolution: 0.04 + threshold: 1.1 + use_threshold: True +semantic_filter: + type: "semantic_filter" + enable: True + fill_nan: False + is_height_layer: False + layer_name: "sem_fil" + extra_params: + classes: [ 'grass','tree','fence','person' ] + colors: [ [ 0,255,0 ],[ 120,120,0 ],[ 170,0,20 ],[ 0,0,255 ],[ 255,0,0 ] ] +semantic_traversability: + type: "semantic_traversability" + enable: True + fill_nan: False + is_height_layer: False + layer_name: "sem_traversability" + extra_params: + layers: [ 'traversability','robot_centric_elevation' ] + thresholds: [ 0.7,0.5 ] + type: [ 'traversability', 'elevation' ] diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_elevation_mapping.py b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_elevation_mapping.py new file mode 100644 index 00000000..7bdbb20b --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_elevation_mapping.py @@ -0,0 +1,118 @@ +import pytest +from elevation_mapping_cupy import parameter, elevation_mapping +import cupy as cp +import numpy as np + + +def encode_max(maxim, index): + maxim, index = cp.asarray(maxim, dtype=cp.float32), cp.asarray(index, dtype=cp.uint32) + # fuse them + maxim = maxim.astype(cp.float16) + maxim = maxim.view(cp.uint16) + maxim = maxim.astype(cp.uint32) + index = index.astype(cp.uint32) + mer = cp.array(cp.left_shift(index, 16) | maxim, dtype=cp.uint32) + mer = mer.view(cp.float32) + return mer + + +@pytest.fixture() +def elmap_ex(add_lay, fusion_alg): + additional_layer = add_lay + fusion_algorithms = fusion_alg + p = parameter.Parameter( + use_chainer=False, + weight_file="../../../config/weights.dat", + plugin_config_file="../../../config/plugin_config.yaml", + ) + p.subscriber_cfg["front_cam"]["channels"] = additional_layer + p.subscriber_cfg["front_cam"]["fusion"] = fusion_algorithms + p.update() + e = elevation_mapping.ElevationMap(p) + return e + + +@pytest.mark.parametrize( + "add_lay,fusion_alg", + [ + (["feat_0", "feat_1", "rgb"], ["average", "average", "color"]), + (["feat_0", "feat_1"], ["average", "average"]), + (["feat_0", "feat_1"], ["class_average", "class_average"]), + (["feat_0", "feat_1"], ["class_bayesian", "class_bayesian"]), + (["feat_0", "feat_1"], ["class_bayesian", "class_max"]), + (["feat_0", "feat_1"], ["bayesian_inference", "bayesian_inference"]), + ], +) +class TestElevationMap: + def test_init(self, elmap_ex): + assert len(elmap_ex.layer_names) == elmap_ex.elevation_map.shape[0] + # assert elmap_ex.color_map is None + + def test_input(self, elmap_ex): + channels = ["x", "y", "z"] + elmap_ex.param.additional_layers + if "class_max" in elmap_ex.param.fusion_algorithms: + val = cp.random.rand(100000, len(channels), dtype=cp.float32).astype(cp.float16) + ind = cp.random.randint(0, 2, (100000, len(channels)), dtype=cp.uint32).astype(cp.float32) + points = encode_max(val, ind) + else: + points = cp.random.rand(100000, len(channels), dtype=elmap_ex.param.data_type) + R = cp.random.rand(3, 3, dtype=elmap_ex.param.data_type) + t = cp.random.rand(3, dtype=elmap_ex.param.data_type) + elmap_ex.input_pointcloud(points, channels, R, t, 0, 0) + + def test_update_normal(self, elmap_ex): + elmap_ex.update_normal(elmap_ex.elevation_map[0]) + + def test_move_to(self, elmap_ex): + for i in range(20): + pos = np.array([i * 0.01, i * 0.02, i * 0.01]) + R = cp.random.rand(3, 3) + elmap_ex.move_to(pos, R) + + def test_get_map(self, elmap_ex): + layers = [ + "elevation", + "variance", + "traversability", + "min_filter", + "smooth", + "inpaint", + "rgb", + ] + data = np.zeros((elmap_ex.cell_n - 2, elmap_ex.cell_n - 2), dtype=cp.float32) + for layer in layers: + elmap_ex.get_map_with_name_ref(layer, data) + + def test_get_position(self, elmap_ex): + pos = np.random.rand(1, 3) + elmap_ex.get_position(pos) + + def test_clear(self, elmap_ex): + elmap_ex.clear() + + def test_move(self, elmap_ex): + delta_position = np.random.rand(3) + elmap_ex.move(delta_position) + + def test_exists_layer(self, elmap_ex, add_lay): + for layer in add_lay: + assert elmap_ex.exists_layer(layer) + + def test_polygon_traversability(self, elmap_ex): + polygon = cp.array([[0, 0], [2, 0], [0, 2]], dtype=np.float64) + result = np.array([0, 0, 0]) + number_polygons = elmap_ex.get_polygon_traversability(polygon, result) + untraversable_polygon = np.zeros((number_polygons, 2)) + elmap_ex.get_untraversable_polygon(untraversable_polygon) + + def test_initialize_map(self, elmap_ex): + methods = ["linear", "cubic", "nearest"] + for method in methods: + points = np.array([[-4.0, 0.0, 0.0], [-4.0, 8.0, 1.0], [4.0, 8.0, 0.0], [4.0, 0.0, 0.0]]) + elmap_ex.initialize_map(points, method) + + def test_plugins(self, elmap_ex): + layers = elmap_ex.plugin_manager.layer_names + data = np.zeros((200, 200), dtype=np.float32) + for layer in layers: + elmap_ex.get_map_with_name_ref(layer, data) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_parameter.py b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_parameter.py new file mode 100644 index 00000000..4c497349 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_parameter.py @@ -0,0 +1,17 @@ +import pytest +from elevation_mapping_cupy.parameter import Parameter + + +def test_parameter(): + param = Parameter( + use_chainer=False, + weight_file="../../../config/weights.dat", + plugin_config_file="../../../config/plugin_config.yaml", + ) + res = param.resolution + param.set_value("resolution", 0.1) + param.get_types() + param.get_names() + param.update() + assert param.resolution == param.get_value("resolution") + param.load_weights(param.weight_file) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_plugins.py b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_plugins.py new file mode 100644 index 00000000..f3e8cc1c --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_plugins.py @@ -0,0 +1,69 @@ +import pytest +from elevation_mapping_cupy import semantic_map, parameter +import cupy as cp +import numpy as np +from elevation_mapping_cupy.plugins.plugin_manager import PluginManager, PluginParams + +plugin_path = "plugin_config.yaml" + + +@pytest.fixture() +def semmap_ex(add_lay, fusion_alg): + p = parameter.Parameter( + use_chainer=False, weight_file="../../../config/weights.dat", plugin_config_file=plugin_path, + ) + p.subscriber_cfg["front_cam"]["channels"] = add_lay + p.subscriber_cfg["front_cam"]["fusion"] = fusion_alg + p.update() + e = semantic_map.SemanticMap(p) + e.initialize_fusion() + return e + + +@pytest.mark.parametrize( + "add_lay, fusion_alg,channels", + [ + ( + ["grass", "tree", "fence", "person"], + ["class_average", "class_average", "class_average", "class_average"], + ["grass"], + ), + (["grass", "tree"], ["class_average", "class_average"], ["grass"]), + (["grass", "tree"], ["class_average", "class_max"], ["tree"]), + (["max1", "max2"], ["class_max", "class_max"], ["max1", "max2"]), + ], +) +def test_plugin_manager(semmap_ex, channels): + manager = PluginManager(202) + manager.load_plugin_settings(plugin_path) + elevation_map = cp.zeros((7, 202, 202)).astype(cp.float32) + rotation = cp.eye(3, dtype=cp.float32) + layer_names = [ + "elevation", + "variance", + "is_valid", + "traversability", + "time", + "upper_bound", + "is_upper_bound", + ] + elevation_map[0] = cp.random.randn(202, 202) + elevation_map[2] = cp.abs(cp.random.randn(202, 202)) + elevation_map[0] + manager.layers[0] + manager.update_with_name("min_filter", elevation_map, layer_names) + manager.update_with_name("smooth_filter", elevation_map, layer_names) + manager.update_with_name("semantic_filter", elevation_map, layer_names, semmap_ex, rotation) + manager.update_with_name("semantic_traversability", elevation_map, layer_names, semmap_ex) + manager.get_map_with_name("smooth") + for lay in manager.get_layer_names(): + manager.update_with_name( + lay, + elevation_map, + layer_names, + semmap_ex.semantic_map, + semmap_ex.param, + rotation, + semmap_ex.elements_to_shift, + ) + manager.get_map_with_name(lay) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_semantic_kernels.py b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_semantic_kernels.py new file mode 100644 index 00000000..590946eb --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_semantic_kernels.py @@ -0,0 +1,307 @@ +import pytest +from elevation_mapping_cupy import parameter, elevation_mapping +import cupy as cp + +from elevation_mapping_cupy.parameter import Parameter + +from elevation_mapping_cupy.kernels import add_points_kernel +from elevation_mapping_cupy.kernels import ( + average_kernel, + class_average_kernel, + alpha_kernel, + bayesian_inference_kernel, + add_color_kernel, + color_average_kernel, + sum_compact_kernel, + sum_max_kernel, + sum_kernel, +) + + +# to check output run: pytest -rP test_semantic_kernels.py + + +# only kernel where we test only one layer +def test_color_kernel(): + # params + cell_n = 4 + pcl_ids, layer_ids = cp.array([3], dtype=cp.int32), cp.array([0], dtype=cp.int32) + R = cp.eye(3, dtype=cp.float32) + t = cp.array([0, 0, 0], dtype=cp.float32) + points_all = cp.array([[0.1, 0.1, 0.1, 0.3], [0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1]], dtype=cp.float32) + semantic_map = cp.zeros((1, 4, 4), dtype=cp.float32) + + # compile kernel + add_color_kernel_ = add_color_kernel(cell_n, cell_n,) + color_average_kernel_ = color_average_kernel(cell_n, cell_n) + + # updatelayer + color_map = cp.zeros((1 + 3 * layer_ids.shape[0], cell_n, cell_n), dtype=cp.uint32,) + + points_all = points_all.astype(cp.float32) + add_color_kernel_( + points_all, + R, + t, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + color_map, + size=(points_all.shape[0] * pcl_ids.shape[0]), + ) + color_average_kernel_( + color_map, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + semantic_map, + size=(cell_n * cell_n * pcl_ids.shape[0]), + ) + print(color_map) + + +@pytest.mark.parametrize( + "map_shape, points_all,pcl_ids, layer_ids", + [ + ( + (4, 4, 4), + cp.array( + [[0.1, 0.1, 0.1, 0.3, 0.3], [0.1, 0.1, 0.1, 0.1, 0.2], [0.1, 0.1, 0.1, 0.1, 0.2]], dtype=cp.float32 + ), + cp.array([3, 4], dtype=cp.int32), + cp.array([1, 2], dtype=cp.int32), + ), + ], +) +def test_sum_kernel(map_shape, points_all, pcl_ids, layer_ids): + # create points + resolution = 0.9 + points = points_all[:, :3] + # arguments for kernel + semantic_map = cp.zeros(map_shape, dtype=cp.float32) + new_map = cp.zeros(map_shape, dtype=cp.float32) + R = cp.eye(3, dtype=cp.float32) + t = cp.array([0, 0, 0], dtype=cp.float32) + # compile kernel + sum_kernel_ = sum_kernel(0.9, 4, 4,) + # simulating adding the points + print("idx, valid, inside, values") + points_all[:, 0] = 1 + points_all[:, 1:3] = 1.0 + print("points all after ", points_all) + # run the kernel + sum_kernel_( + points_all, + R, + t, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + semantic_map, + new_map, + size=(points_all.shape[0] * pcl_ids.shape[0]), + ) + print("semantic_map", semantic_map) + print("new_map", new_map) + + +@pytest.mark.parametrize( + "map_shape, points_all,pcl_ids, layer_ids", + [ + ( + (4, 4, 4), + cp.array( + [[0.1, 0.1, 0.1, 0.3, 0.3], [0.1, 0.1, 0.1, 0.1, 0.2], [0.1, 0.1, 0.1, 0.1, 0.2]], dtype=cp.float32 + ), + cp.array([3, 4], dtype=cp.int32), + cp.array([1, 2], dtype=cp.int32), + ), + ], +) +def test_average_kernel(map_shape, points_all, pcl_ids, layer_ids): + elevation_map = cp.zeros((3, 4, 4), dtype=cp.float32) + elevation_map[2] = points_all.shape[0] + semantic_map = cp.zeros(map_shape, dtype=cp.float32) + new_map = cp.ones(map_shape, dtype=cp.float32) + R = cp.eye(3, dtype=cp.float32) + t = cp.array([0, 0, 0], dtype=cp.float32) + # compile kernel + average_kernel_ = average_kernel(4, 4,) + cell_n = 4 + print("new_map", new_map) + print("semantic_map", semantic_map) + print("elevation_map", elevation_map) + + average_kernel_( + new_map, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + elevation_map, + semantic_map, + size=(cell_n * cell_n * pcl_ids.shape[0]), + ) + print("semantic_map", semantic_map) + + +@pytest.mark.parametrize( + "map_shape, points_all,pcl_ids, layer_ids", + [ + ( + (3, 4, 4), + cp.array( + [[0.1, 0.1, 0.1, 0.3, 0.3], [0.1, 0.1, 0.1, 0.1, 0.2], [0.1, 0.1, 0.1, 0.1, 0.2]], dtype=cp.float32 + ), + cp.array([3], dtype=cp.int32), + cp.array([0], dtype=cp.int32), + ), + ], +) +def test_bayesian_inference_kernel(map_shape, points_all, pcl_ids, layer_ids): + # params + cell_n = 4 + resolution = 0.9 + elevation_map = cp.zeros((3, 4, 4), dtype=cp.float32) + elevation_map[2] = points_all.shape[0] + semantic_map = cp.zeros(map_shape, dtype=cp.float32) + new_map = cp.ones(map_shape, dtype=cp.float32) + R = cp.eye(3, dtype=cp.float32) + t = cp.array([0, 0, 0], dtype=cp.float32) + + # compile kernel + sum_mean = cp.ones((pcl_ids.shape[0], cell_n, cell_n,), cp.float32,) + sum_compact_kernel_ = sum_compact_kernel(resolution, cell_n, cell_n,) + bayesian_inference_kernel_ = bayesian_inference_kernel(cell_n, cell_n,) + # updatelayer + sum_mean *= 0 + sum_compact_kernel_( + points_all, + R, + t, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + sum_mean, + size=(points_all.shape[0] * pcl_ids.shape[0]), + ) + bayesian_inference_kernel_( + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + elevation_map, + new_map, + sum_mean, + semantic_map, + size=(cell_n * cell_n * pcl_ids.shape[0]), + ) + print("semantic_map", semantic_map) + + +@pytest.mark.parametrize( + "map_shape, points_all,pcl_ids, layer_ids", + [ + ( + (4, 4, 4), + cp.array( + [[0.1, 0.1, 0.1, 0.3, 0.3], [0.1, 0.1, 0.1, 0.1, 0.2], [0.1, 0.1, 0.1, 0.1, 0.2]], dtype=cp.float32 + ), + cp.array([3, 4], dtype=cp.int32), + cp.array([1, 2], dtype=cp.int32), + ), + ], +) +def test_class_average_kernel(map_shape, points_all, pcl_ids, layer_ids): + # params + cell_n = 4 + resolution = 0.9 + average_weight = 0.5 + elevation_map = cp.zeros((3, 4, 4), dtype=cp.float32) + elevation_map[2] = 3 + semantic_map = cp.zeros(map_shape, dtype=cp.float32) + new_map = cp.zeros(map_shape, dtype=cp.float32) + R = cp.eye(3, dtype=cp.float32) + t = cp.array([0, 0, 0], dtype=cp.float32) + # compile kernel + sum_kernel_ = sum_kernel(0.9, 4, 4,) + class_average_kernel_ = class_average_kernel(cell_n, cell_n, average_weight,) + print("x,y,z,class") + print("points all after ", points_all) + + # simulating adding the points + print("idx, valid, inside, values") + points_all[:, 0] = 1 + points_all[:, 1:3] = 1.0 + print("points all after ", points_all) + # run the kernel + sum_kernel_( + points_all, + R, + t, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + semantic_map, + new_map, + size=(points_all.shape[0] * pcl_ids.shape[0]), + ) + print("new_map bef", new_map) + print("pcl_ids.shape[0]", pcl_ids.shape[0]) + class_average_kernel_( + new_map, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + elevation_map, + semantic_map, + size=(cell_n * cell_n * pcl_ids.shape[0]), + ) + print("semantic_map", semantic_map) + print("new_map", new_map) + + +@pytest.mark.parametrize( + "map_shape, points_all,pcl_ids, layer_ids", + [ + ( + (4, 4, 4), + cp.array( + [[0.1, 0.1, 0.1, 0.3, 0.3], [0.1, 0.1, 0.1, 0.1, 0.2], [0.1, 0.1, 0.1, 0.1, 0.2]], dtype=cp.float32 + ), + cp.array([3, 4], dtype=cp.int32), + cp.array([1, 2], dtype=cp.int32), + ), + ], +) +def test_class_bayesian_inference_fct(map_shape, points_all, pcl_ids, layer_ids): + # params + cell_n = 4 + resolution = 0.9 + elevation_map = cp.zeros((3, 4, 4), dtype=cp.float32) + elevation_map[2] = points_all.shape[0] + semantic_map = cp.zeros(map_shape, dtype=cp.float32) + new_map = cp.zeros(map_shape, dtype=cp.float32) + R = cp.eye(3, dtype=cp.float32) + t = cp.array([0, 0, 0], dtype=cp.float32) + # compile kernel + alpha_kernel_ = alpha_kernel(resolution, cell_n, cell_n,) + # simulating adding the points + print("idx, valid, inside, values") + points_all[:, 0] = 1 + points_all[:, 1:3] = 1.0 + print("points all after ", points_all) + # run the kernel + alpha_kernel_( + points_all, + pcl_ids, + layer_ids, + cp.array([points_all.shape[1], pcl_ids.shape[0]], dtype=cp.int32), + new_map, + size=(points_all.shape[0] * pcl_ids.shape[0]), + ) + # calculate new thetas + sum_alpha = cp.sum(new_map[layer_ids], axis=0) + # do not divide by zero + sum_alpha[sum_alpha == 0] = 1 + semantic_map[layer_ids] = new_map[layer_ids] / cp.expand_dims(sum_alpha, axis=0) + print("semantic_map", semantic_map) + print("new_map", new_map) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_semantic_map.py b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_semantic_map.py new file mode 100644 index 00000000..95966bc4 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/tests/test_semantic_map.py @@ -0,0 +1,47 @@ +import pytest +from elevation_mapping_cupy import semantic_map, parameter +import cupy as cp +import numpy as np + + +@pytest.fixture() +def semmap_ex(sem_lay, fusion_alg): + p = parameter.Parameter( + use_chainer=False, + weight_file="../../../config/weights.dat", + plugin_config_file="../../../config/plugin_config.yaml", + ) + for subs, value in p.subscriber_cfg.items(): + value["channels"] = sem_lay + value["fusion"] = fusion_alg + p.update() + e = semantic_map.SemanticMap(p) + return e + + +@pytest.mark.parametrize( + "sem_lay, fusion_alg,channels", + [ + (["feat_0", "feat_1"], ["average", "average"], ["feat_0"]), + (["feat_0", "feat_1"], ["average", "average"], []), + (["feat_0", "feat_1", "rgb"], ["average", "average", "color"], ["rgb", "feat_0"],), + (["feat_0", "feat_1", "rgb"], ["class_average", "average", "color"], ["rgb", "feat_0"],), + (["feat_0", "feat_1", "rgb"], ["class_bayesian", "average", "color"], ["rgb", "feat_0"],), + (["feat_0", "feat_1", "rgb"], ["class_bayesian", "average", "color"], ["rgb", "feat_0", "feat_1"],), + (["feat_0", "feat_1", "rgb"], ["class_bayesian", "class_max", "color"], ["rgb", "feat_0", "feat_1"],), + (["max1", "max2", "rgb"], ["class_max", "class_max", "color"], ["rgb", "max1", "max2"],), + ], +) +def test_fusion_of_pcl(semmap_ex, channels): + fusion = semmap_ex.get_fusion_of_pcl(channels=channels) + assert len(fusion) <= len(channels) + assert len(fusion) > 0 or len(channels) == 0 + assert all(isinstance(item, str) for item in fusion) + + +@pytest.mark.parametrize( + "sem_lay, fusion_alg", [(["feat_0", "feat_1", "rgb"], ["average", "average", "color"]),], +) +@pytest.mark.parametrize("channels", [["rgb"], ["rgb", "feat_0"], []]) +def test_indices_fusion(semmap_ex, channels, fusion_alg): + pcl_indices, layer_indices = semmap_ex.get_indices_fusion(pcl_channels=channels, fusion_alg=fusion_alg[0]) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/traversability_filter.py b/elevation_mapping_cupy/elevation_mapping_cupy/traversability_filter.py new file mode 100644 index 00000000..7dfb47c2 --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/traversability_filter.py @@ -0,0 +1,100 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import cupy as cp + + +def get_filter_torch(*args, **kwargs): + import torch + import torch.nn as nn + + class TraversabilityFilter(nn.Module): + def __init__(self, w1, w2, w3, w_out, device="cuda", use_bias=False): + super(TraversabilityFilter, self).__init__() + self.conv1 = nn.Conv2d(1, 4, 3, dilation=1, padding=0, bias=use_bias) + self.conv2 = nn.Conv2d(1, 4, 3, dilation=2, padding=0, bias=use_bias) + self.conv3 = nn.Conv2d(1, 4, 3, dilation=3, padding=0, bias=use_bias) + self.conv_out = nn.Conv2d(12, 1, 1, bias=use_bias) + + # Set weights. + self.conv1.weight = nn.Parameter(torch.from_numpy(w1).float()) + self.conv2.weight = nn.Parameter(torch.from_numpy(w2).float()) + self.conv3.weight = nn.Parameter(torch.from_numpy(w3).float()) + self.conv_out.weight = nn.Parameter(torch.from_numpy(w_out).float()) + + def __call__(self, elevation_cupy): + # Convert cupy tensor to pytorch. + elevation_cupy = elevation_cupy.astype(cp.float32) + elevation = torch.as_tensor(elevation_cupy, device=self.conv1.weight.device) + + with torch.no_grad(): + out1 = self.conv1(elevation.view(-1, 1, elevation.shape[0], elevation.shape[1])) + out2 = self.conv2(elevation.view(-1, 1, elevation.shape[0], elevation.shape[1])) + out3 = self.conv3(elevation.view(-1, 1, elevation.shape[0], elevation.shape[1])) + + out1 = out1[:, :, 2:-2, 2:-2] + out2 = out2[:, :, 1:-1, 1:-1] + out = torch.cat((out1, out2, out3), dim=1) + # out = F.concat((out1, out2, out3), axis=1) + out = self.conv_out(out.abs()) + out = torch.exp(-out) + out_cupy = cp.asarray(out) + + return out_cupy + + traversability_filter = TraversabilityFilter(*args, **kwargs).cuda().eval() + return traversability_filter + + +def get_filter_chainer(*args, **kwargs): + import os + + os.environ["CHAINER_WARN_VERSION_MISMATCH"] = "0" + import chainer + import chainer.links as L + import chainer.functions as F + + class TraversabilityFilter(chainer.Chain): + def __init__(self, w1, w2, w3, w_out, use_cupy=True): + super(TraversabilityFilter, self).__init__() + self.conv1 = L.Convolution2D(1, 4, ksize=3, pad=0, dilate=1, nobias=True, initialW=w1) + self.conv2 = L.Convolution2D(1, 4, ksize=3, pad=0, dilate=2, nobias=True, initialW=w2) + self.conv3 = L.Convolution2D(1, 4, ksize=3, pad=0, dilate=3, nobias=True, initialW=w3) + self.conv_out = L.Convolution2D(12, 1, ksize=1, nobias=True, initialW=w_out) + + if use_cupy: + self.conv1.to_gpu() + self.conv2.to_gpu() + self.conv3.to_gpu() + self.conv_out.to_gpu() + chainer.config.train = False + chainer.config.enable_backprop = False + + def __call__(self, elevation): + out1 = self.conv1(elevation.reshape(-1, 1, elevation.shape[0], elevation.shape[1])) + out2 = self.conv2(elevation.reshape(-1, 1, elevation.shape[0], elevation.shape[1])) + out3 = self.conv3(elevation.reshape(-1, 1, elevation.shape[0], elevation.shape[1])) + + out1 = out1[:, :, 2:-2, 2:-2] + out2 = out2[:, :, 1:-1, 1:-1] + out = F.concat((out1, out2, out3), axis=1) + out = self.conv_out(F.absolute(out)) + return F.exp(-out).array + + traversability_filter = TraversabilityFilter(*args, **kwargs) + return traversability_filter + + +if __name__ == "__main__": + import cupy as cp + from parameter import Parameter + + elevation = cp.random.randn(202, 202, dtype=cp.float32) + print("elevation ", elevation.shape) + param = Parameter() + fc = get_filter_chainer(param.w1, param.w2, param.w3, param.w_out) + print("chainer ", fc(elevation)) + + ft = get_filter_torch(param.w1, param.w2, param.w3, param.w_out) + print("torch ", ft(elevation)) diff --git a/elevation_mapping_cupy/elevation_mapping_cupy/traversability_polygon.py b/elevation_mapping_cupy/elevation_mapping_cupy/traversability_polygon.py new file mode 100644 index 00000000..be874f1b --- /dev/null +++ b/elevation_mapping_cupy/elevation_mapping_cupy/traversability_polygon.py @@ -0,0 +1,77 @@ +# +# Copyright (c) 2022, Takahiro Miki. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for details. +# +import numpy as np +import cupy as cp +from shapely.geometry import Polygon, MultiPoint + + +def get_masked_traversability(map_array, mask, traversability): + traversability = traversability[1:-1, 1:-1] + is_valid = map_array[2][1:-1, 1:-1] + mask = mask[1:-1, 1:-1] + + # invalid place is 0 traversability value + untraversability = cp.where(is_valid > 0.5, 1 - traversability, 0) + masked = untraversability * mask + masked_isvalid = is_valid * mask + return masked, masked_isvalid + + +def is_traversable(masked_untraversability, thresh, min_thresh, max_over_n): + untraversable_thresh = 1 - thresh + max_thresh = 1 - min_thresh + over_thresh = cp.where(masked_untraversability > untraversable_thresh, 1, 0) + polygon = calculate_untraversable_polygon(over_thresh) + max_untraversability = masked_untraversability.max() + if over_thresh.sum() > max_over_n: + is_safe = False + elif max_untraversability > max_thresh: + is_safe = False + else: + is_safe = True + return is_safe, polygon + + +def calculate_area(polygon): + area = 0 + for i in range(len(polygon)): + p1 = polygon[i - 1] + p2 = polygon[i] + area += (p1[0] * p2[1] - p1[1] * p2[0]) / 2.0 + return abs(area) + + +def calculate_untraversable_polygon(over_thresh): + x, y = cp.where(over_thresh > 0.5) + points = cp.stack([x, y]).T + convex_hull = MultiPoint(points.get()).convex_hull + if convex_hull.is_empty or convex_hull.geom_type == "Point" or convex_hull.geom_type == "LineString": + return None + else: + return cp.array(convex_hull.exterior.coords) + + +def transform_to_map_position(polygon, center, cell_n, resolution): + polygon = center.reshape(1, 2) + (polygon - cell_n / 2.0) * resolution + return polygon + + +def transform_to_map_index(points, center, cell_n, resolution): + indices = ((points - center.reshape(1, 2)) / resolution + cell_n / 2).astype(cp.int32) + return indices + + +if __name__ == "__main__": + polygon = [[0, 0], [2, 0], [0, 2]] + print(calculate_area(polygon)) + + under_thresh = cp.zeros((20, 20)) + # under_thresh[10:12, 8:10] = 1.0 + under_thresh[14:18, 8:10] = 1.0 + under_thresh[1:8, 2:9] = 1.0 + print(under_thresh) + polygon = calculate_untraversable_polygon(under_thresh) + print(polygon) + transform_to_map_position(polygon, cp.array([0.5, 1.0]), 6.0, 0.05) diff --git a/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_ros.hpp b/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_ros.hpp new file mode 100644 index 00000000..ebdd016e --- /dev/null +++ b/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_ros.hpp @@ -0,0 +1,243 @@ +// +// Copyright (c) 2022, Takahiro Miki. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for details. +// + +#pragma once + +// STL +#include +#include + +// Eigen +#include + +// Pybind +#include // everything needed for embedding +#include +// ROS2 +#include +#include +#include +#include +#include "message_filters/subscriber.h" +#include "message_filters/sync_policies/approximate_time.h" +#include "message_filters/synchronizer.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +// Grid Map +#include +#include +#include + +// PCL +#include +#include +#include +#include + +// OpenCV +#include +#include + +#include +#include +#include +#include + +#include "elevation_mapping_cupy/elevation_mapping_wrapper.hpp" + +namespace py = pybind11; + +namespace elevation_mapping_cupy { + +class ElevationMappingNode : public rclcpp::Node { + public: + ElevationMappingNode(const rclcpp::NodeOptions& options); + using RowMatrixXd = Eigen::Matrix; + using ColMatrixXf = Eigen::Matrix; + + // using ImageSubscriber = image_transport::SubscriberFilter; + using ImageSubscriber = message_filters::Subscriber; + using ImageSubscriberPtr = std::shared_ptr; + + // Subscriber and Synchronizer for CameraInfo messages + using CameraInfoSubscriber = message_filters::Subscriber; + using CameraInfoSubscriberPtr = std::shared_ptr; + using CameraPolicy = message_filters::sync_policies::ApproximateTime; + using CameraSync = message_filters::Synchronizer; + using CameraSyncPtr = std::shared_ptr; + + // Subscriber and Synchronizer for ChannelInfo messages + using ChannelInfoSubscriber = message_filters::Subscriber; + using ChannelInfoSubscriberPtr = std::shared_ptr; + using CameraChannelPolicy = message_filters::sync_policies::ApproximateTime; + using CameraChannelSync = message_filters::Synchronizer; + using CameraChannelSyncPtr = std::shared_ptr; + + // Subscriber and Synchronizer for Pointcloud messages + // using PointCloudSubscriber = message_filters::Subscriber; + // using PointCloudSubscriberPtr = std::shared_ptr; + // using PointCloudPolicy = message_filters::sync_policies::ApproximateTime; + // using PointCloudSync = message_filters::Synchronizer; + // using PointCloudSyncPtr = std::shared_ptr; + + private: +void readParameters(); +void setupMapPublishers(); +void pointcloudCallback(const sensor_msgs::msg::PointCloud2::SharedPtr cloud, const std::string& key); +void pointcloudtransportCallback(const sensor_msgs::msg::PointCloud2::ConstSharedPtr cloud, const std::string& key); + +void inputPointCloud(const sensor_msgs::msg::PointCloud2::ConstSharedPtr cloud, const std::vector& channels); + +void inputImage(const sensor_msgs::msg::Image::ConstSharedPtr& image_msg, + const sensor_msgs::msg::CameraInfo::ConstSharedPtr& camera_info_msg, + const std::vector& channels); + + +// void imageCallback(const sensor_msgs::msg::Image::SharedPtr image_msg, const sensor_msgs::msg::CameraInfo::SharedPtr camera_info_msg, const std::string& key); +// void imageChannelCallback(const sensor_msgs::msg::Image::SharedPtr image_msg, const sensor_msgs::msg::CameraInfo::SharedPtr camera_info_msg, const elevation_map_msgs::msg::ChannelInfo::SharedPtr channel_info_msg); +void imageCallback(const sensor_msgs::msg::Image::SharedPtr cloud, const std::string& key); +void imageChannelCallback(const elevation_map_msgs::msg::ChannelInfo::SharedPtr chennel_info, const std::string& key); +void imageInfoCallback(const sensor_msgs::msg::CameraInfo::SharedPtr image_info, const std::string& key); +// void pointCloudChannelCallback(const sensor_msgs::PointCloud2& cloud, const elevation_map_msgs::ChannelInfoConstPtr& channel_info_msg); +// // void multiLayerImageCallback(const elevation_map_msgs::MultiLayerImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& camera_info_msg); +void publishAsPointCloud(const grid_map::GridMap& map) const; +bool getSubmap( const std::shared_ptr request, std::shared_ptr response); + + +// bool checkSafety(elevation_map_msgs::CheckSafety::Request& request, elevation_map_msgs::CheckSafety::Response& response); + +void initializeMap(const std::shared_ptr request, std::shared_ptr response); +void clearMap(const std::shared_ptr request, std::shared_ptr response); +void clearMapWithInitializer(const std::shared_ptr request, std::shared_ptr response); + + + +void setPublishPoint(const std::shared_ptr request, + std::shared_ptr response); + + +void updateVariance(); +void updateTime(); +void updatePose(); +void updateGridMap(); +void publishStatistics(); + +void publishNormalAsArrow(const grid_map::GridMap& map) const; +void initializeWithTF(); +void publishMapToOdom(double error); +void publishMapOfIndex(int index); +visualization_msgs::msg::Marker vectorToArrowMarker(const Eigen::Vector3d& start, const Eigen::Vector3d& end, const int id) const; + + rclcpp::Node::SharedPtr node_; + // image_transport::ImageTransport it_; + std::vector::SharedPtr> pointcloudSubs_; + + // std::vector::SharedPtr> pointcloudtransportSubs_; + std::vector pointcloudtransportSubs_; + + std::vector::SharedPtr> imageSubs_; + std::vector::SharedPtr> cameraInfoSubs_; + std::vector::SharedPtr> channelInfoSubs_; + + + // std::vector imageSubs_; + // std::vector cameraInfoSubs_; + // std::vector channelInfoSubs_; + // std::vector cameraSyncs_; + // std::vector cameraChannelSyncs_; + // std::vector pointCloudSyncs_; + std::vector::SharedPtr> mapPubs_; + + + std::shared_ptr tfBroadcaster_; + std::shared_ptr tfListener_; + std::shared_ptr tfBuffer_; + + + rclcpp::Publisher::SharedPtr alivePub_; + rclcpp::Publisher::SharedPtr pointPub_; + rclcpp::Publisher::SharedPtr normalPub_; + rclcpp::Publisher::SharedPtr statisticsPub_; + rclcpp::Service::SharedPtr rawSubmapService_; + rclcpp::Service::SharedPtr clearMapService_; + rclcpp::Service::SharedPtr clearMapWithInitializerService_; + rclcpp::Service::SharedPtr initializeMapService_; + rclcpp::Service::SharedPtr setPublishPointService_; + rclcpp::Service::SharedPtr checkSafetyService_; + rclcpp::TimerBase::SharedPtr updateVarianceTimer_; + rclcpp::TimerBase::SharedPtr updateTimeTimer_; + rclcpp::TimerBase::SharedPtr updatePoseTimer_; + rclcpp::TimerBase::SharedPtr updateGridMapTimer_; + rclcpp::TimerBase::SharedPtr publishStatisticsTimer_; + rclcpp::Time lastStatisticsPublishedTime_; + + + std::shared_ptr map_; + // ElevationMappingWrapper map_; + std::string mapFrameId_; + std::string correctedMapFrameId_; + std::string baseFrameId_; + + + // map topics info + std::vector> map_topics_; + std::vector> map_layers_; + std::vector> map_basic_layers_; + std::set map_layers_all_; + std::set map_layers_sync_; + std::vector map_fps_; + std::set map_fps_unique_; + std::vector mapTimers_; + std::map> channels_; + + std::vector initialize_frame_id_; + std::vector initialize_tf_offset_; + std::string initializeMethod_; + + Eigen::Vector3d lowpassPosition_; + Eigen::Vector4d lowpassOrientation_; + + std::mutex mapMutex_; // protects gridMap_ + grid_map::GridMap gridMap_; + std::atomic_bool isGridmapUpdated_; // needs to be atomic (read is not protected by mapMutex_) + + std::mutex errorMutex_; // protects positionError_, and orientationError_ + double positionError_; + double orientationError_; + + double positionAlpha_; + double orientationAlpha_; + double voxel_filter_size_; + pcl::VoxelGrid voxel_filter; + + double recordableFps_; + std::atomic_bool enablePointCloudPublishing_; + bool enableNormalArrowPublishing_; + bool enableDriftCorrectedTFPublishing_; + bool useInitializerAtStart_; + double initializeTfGridSize_; + bool alwaysClearWithInitializer_; + std::atomic_int pointCloudProcessCounter_; + + std::map> imageInfoReady_; + std::map> imageChannelReady_; +}; + +} // namespace elevation_mapping_cupy diff --git a/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_wrapper.hpp b/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_wrapper.hpp new file mode 100644 index 00000000..f0090254 --- /dev/null +++ b/elevation_mapping_cupy/include/elevation_mapping_cupy/elevation_mapping_wrapper.hpp @@ -0,0 +1,83 @@ +// +// Copyright (c) 2022, Takahiro Miki. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for details. +// + +#pragma once + +// STL +#include + +// Eigen +#include + +// Pybind +#include // everything needed for embedding +#include + +// ROS2 +#include +#include +#include +#include +#include +#include + +// Grid Map +#include +#include +#include + +// PCL +#include +#include +#include + +namespace py = pybind11; + +namespace elevation_mapping_cupy { + +std::vector extract_unique_names(const std::map& subscriber_params); + +class ElevationMappingWrapper { + public: + using RowMatrixXd = Eigen::Matrix; + using RowMatrixXf = Eigen::Matrix; + using ColMatrixXf = Eigen::Matrix; + + ElevationMappingWrapper(); + + void initialize(const std::shared_ptr& node); + + void input(const RowMatrixXd& points, const std::vector& channels, const RowMatrixXd& R, const Eigen::VectorXd& t, + const double positionNoise, const double orientationNoise); + void input_image(const std::vector& multichannel_image, const std::vector& channels, const RowMatrixXd& R, + const Eigen::VectorXd& t, const RowMatrixXd& cameraMatrix, const Eigen::VectorXd& D, const std::string distortion_model, int height, int width); + void move_to(const Eigen::VectorXd& p, const RowMatrixXd& R); + void clear(); + void update_variance(); + void update_time(); + bool exists_layer(const std::string& layerName); + void get_layer_data(const std::string& layerName, RowMatrixXf& map); + void get_grid_map(grid_map::GridMap& gridMap, const std::vector& layerNames); + void get_polygon_traversability(std::vector& polygon, Eigen::Vector3d& result, + std::vector& untraversable_polygon); + double get_additive_mean_error(); + void initializeWithPoints(std::vector& points, std::string method); + void addNormalColorLayer(grid_map::GridMap& map); + + private: + + std::shared_ptr node_; + void setParameters(); + + py::object map_; + py::object param_; + double resolution_; + double map_length_; + int map_n_; + bool enable_normal_; + bool enable_normal_color_; +}; + +} // namespace elevation_mapping_cupy diff --git a/elevation_mapping_cupy/launch/anymal.launch b/elevation_mapping_cupy/launch/anymal.launch new file mode 100644 index 00000000..67c2f147 --- /dev/null +++ b/elevation_mapping_cupy/launch/anymal.launch @@ -0,0 +1,7 @@ + + + + + + + diff --git a/elevation_mapping_cupy/launch/anymal.launch.py b/elevation_mapping_cupy/launch/anymal.launch.py new file mode 100644 index 00000000..5f3ea731 --- /dev/null +++ b/elevation_mapping_cupy/launch/anymal.launch.py @@ -0,0 +1,34 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from launch.substitutions import PathJoinSubstitution +from ament_index_python.packages import get_package_share_directory + + +def generate_launch_description(): + elevation_mapping_cupy_dir = get_package_share_directory('elevation_mapping_cupy') + + return LaunchDescription([ + # Elevation Mapping Node + Node( + package='elevation_mapping_cupy', + executable='elevation_mapping_node', + name='elevation_mapping', + parameters=[ + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'config', + 'setups', + 'anymal', + 'anymal_parameters.yaml' + ]), + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'config', + 'setups', + 'anymal', + 'anymal_sensor_parameter.yaml' + ]) + ], + output='screen' + ) + ]) \ No newline at end of file diff --git a/elevation_mapping_cupy/launch/elevation_mapping.launch.py b/elevation_mapping_cupy/launch/elevation_mapping.launch.py new file mode 100755 index 00000000..cb418578 --- /dev/null +++ b/elevation_mapping_cupy/launch/elevation_mapping.launch.py @@ -0,0 +1,88 @@ +import os +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration, PathJoinSubstitution +from launch.conditions import IfCondition + + +def generate_launch_description(): + package_name = 'elevation_mapping_cupy' + share_dir = get_package_share_directory(package_name) + + # Define paths + core_param_path = os.path.join( + share_dir, 'config', 'core', 'core_param.yaml') + + # Declare launch arguments + robot_param_arg = DeclareLaunchArgument( + 'robot_config', + # default_value='turtle_bot/turle_bot_simple.yaml', + default_value='menzi/base.yaml', + description='Name of the robot-specific config file within ' + 'config/setups/' + ) + + launch_rviz_arg = DeclareLaunchArgument( + 'launch_rviz', + default_value='false', + description='Whether to launch RViz' + ) + + rviz_config_arg = DeclareLaunchArgument( + 'rviz_config', + default_value='', + description='Path to the RViz config file' + ) + + use_sim_time_arg = DeclareLaunchArgument( + 'use_sim_time', + default_value='false', + description='Use simulation clock if true' + ) + + # Get launch configurations + robot_config = LaunchConfiguration('robot_config') + robot_param_path = PathJoinSubstitution( + [share_dir, 'config', 'setups', robot_config]) + launch_rviz = LaunchConfiguration('launch_rviz') + rviz_config = LaunchConfiguration('rviz_config') + use_sim_time = LaunchConfiguration('use_sim_time') + + # Verify core config exists + if not os.path.exists(core_param_path): + raise FileNotFoundError( + f"Config file {core_param_path} does not exist") + + # Define nodes + elevation_mapping_node = Node( + package=package_name, + executable='elevation_mapping_node.py', + name='elevation_mapping_node', + output='screen', + parameters=[ + core_param_path, + robot_param_path, + {'use_sim_time': use_sim_time} + ] + ) + + rviz_node = Node( + package='rviz2', + executable='rviz2', + name='rviz2', + arguments=['-d', rviz_config], + parameters=[{'use_sim_time': use_sim_time}], + output='screen', + condition=IfCondition(launch_rviz) + ) + + return LaunchDescription([ + robot_param_arg, + launch_rviz_arg, + rviz_config_arg, + use_sim_time_arg, + elevation_mapping_node, + rviz_node + ]) \ No newline at end of file diff --git a/elevation_mapping_cupy/launch/elevation_mapping_cupy.launch b/elevation_mapping_cupy/launch/elevation_mapping_cupy.launch new file mode 100644 index 00000000..b0fb53c9 --- /dev/null +++ b/elevation_mapping_cupy/launch/elevation_mapping_cupy.launch @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/elevation_mapping_cupy/launch/elevation_mapping_cupy.launch.py b/elevation_mapping_cupy/launch/elevation_mapping_cupy.launch.py new file mode 100644 index 00000000..ac46fb70 --- /dev/null +++ b/elevation_mapping_cupy/launch/elevation_mapping_cupy.launch.py @@ -0,0 +1,32 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from launch.substitutions import PathJoinSubstitution +from ament_index_python.packages import get_package_share_directory +import launch_ros.actions + +def generate_launch_description(): + elevation_mapping_cupy_dir = get_package_share_directory('elevation_mapping_cupy') + + return LaunchDescription([ + launch_ros.actions.SetParameter(name='use_sim_time', value=True), + Node( + package='elevation_mapping_cupy', + executable='elevation_mapping_node.py', + name='elevation_mapping', + parameters=[ + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'config', + 'core', + 'core_param.yaml' + ]), + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'config', + 'core', + 'example_setup.yaml' + ]) + ], + output='screen' + ) + ]) \ No newline at end of file diff --git a/elevation_mapping_cupy/launch/elevation_mapping_turtle.launch.py b/elevation_mapping_cupy/launch/elevation_mapping_turtle.launch.py new file mode 100644 index 00000000..92ac9d72 --- /dev/null +++ b/elevation_mapping_cupy/launch/elevation_mapping_turtle.launch.py @@ -0,0 +1,92 @@ +import os +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration, PathJoinSubstitution, PythonExpression +from launch.conditions import IfCondition, UnlessCondition +from launch_ros.descriptions import ParameterFile + + +def generate_launch_description(): + package_name = 'elevation_mapping_cupy' + share_dir = get_package_share_directory(package_name) + + core_param_path = os.path.join(share_dir, 'config', 'core', 'core_param.yaml') + turtle_param_path = os.path.join(share_dir, 'config', 'setups', 'turtle_bot', 'turtle_bot_simple.yaml') + + # Add verification + if not os.path.exists(core_param_path): + raise FileNotFoundError(f"Core param file not found: {core_param_path}") + if not os.path.exists(turtle_param_path): + raise FileNotFoundError(f"Turtle param file not found: {turtle_param_path}") + + # Declare launch arguments + use_sim_time_arg = DeclareLaunchArgument( + 'use_sim_time', + default_value='true', + description='Use simulation (Gazebo) clock if true' + ) + use_sim_time = LaunchConfiguration('use_sim_time') + + rviz_config_arg = DeclareLaunchArgument( + 'rviz_config', + default_value=PathJoinSubstitution([ + share_dir, 'rviz', 'turtle_sim_laser.rviz' + ]), + description='Path to the RViz config file' + ) + rviz_config = LaunchConfiguration('rviz_config') + + use_python_node_arg = DeclareLaunchArgument( + 'use_python_node', + default_value='false', + description='Use the Python node if true' + ) + use_python_node = LaunchConfiguration('use_python_node') + + # RViz Node + rviz_node = Node( + package='rviz2', + executable='rviz2', + name='rviz2', + arguments=['-d', rviz_config], + parameters=[{'use_sim_time': use_sim_time}], + output='screen' + ) + elevation_mapping_node_py = Node( + package='elevation_mapping_cupy', + executable='elevation_mapping_node.py', + name='elevation_mapping_node', + output='screen', + parameters=[ + ParameterFile(core_param_path, allow_substs=True), + turtle_param_path, + {'use_sim_time': use_sim_time} + ], + condition=IfCondition(use_python_node) + # condition=IfCondition(PythonExpression(use_python_node)) + ) + + elevation_mapping_node = Node( + package='elevation_mapping_cupy', + executable='elevation_mapping_node', + name='elevation_mapping_node', + output='screen', + parameters=[ + ParameterFile(core_param_path, allow_substs=True), + turtle_param_path, + {'use_sim_time': use_sim_time} + ], + condition=UnlessCondition(use_python_node) + # condition=UnlessCondition(PythonExpression(use_python_node)) + ) + + return LaunchDescription([ + use_sim_time_arg, + rviz_config_arg, + use_python_node_arg, + elevation_mapping_node_py, + elevation_mapping_node, + rviz_node + ]) diff --git a/elevation_mapping_cupy/launch/menzi.launch b/elevation_mapping_cupy/launch/menzi.launch new file mode 100644 index 00000000..e69de29b diff --git a/elevation_mapping_cupy/launch/turtlesim_init.launch.py b/elevation_mapping_cupy/launch/turtlesim_init.launch.py new file mode 100644 index 00000000..90530e4b --- /dev/null +++ b/elevation_mapping_cupy/launch/turtlesim_init.launch.py @@ -0,0 +1,167 @@ +# turtlesim_init.launch.py + +import os + +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, ExecuteProcess +from launch.substitutions import LaunchConfiguration, Command, PathJoinSubstitution, TextSubstitution +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory + + +def generate_launch_description(): + # Get the package directories + try: + elevation_mapping_cupy_dir = get_package_share_directory('elevation_mapping_cupy') + gazebo_ros_dir = get_package_share_directory('gazebo_ros') + turtlebot3_gazebo_dir = get_package_share_directory('turtlebot3_gazebo') + turtlebot3_description_dir = get_package_share_directory('turtlebot3_description') + except Exception as e: + print(f"Error getting package directories: {e}") + return LaunchDescription() # Return an empty launch description to prevent further errors + + # Print debug information + print(f"elevation_mapping_cupy_dir: {elevation_mapping_cupy_dir}") + print(f"gazebo_ros_dir: {gazebo_ros_dir}") + print(f"turtlebot3_gazebo_dir: {turtlebot3_gazebo_dir}") + print(f"turtlebot3_description_dir: {turtlebot3_description_dir}") + + # Declare launch arguments + use_sim_time_arg = DeclareLaunchArgument( + 'use_sim_time', + default_value='true', + description='Use simulation (Gazebo) clock if true' + ) + + rviz_config_arg = DeclareLaunchArgument( + 'rviz_config', + default_value=PathJoinSubstitution([ + elevation_mapping_cupy_dir, 'rviz', 'turtle_sim_laser.rviz' + ]), + description='Path to the RViz config file' + ) + + # print rviz config used + print(f"rviz_config: {rviz_config_arg}") + + model_arg = DeclareLaunchArgument( + 'model', + default_value='waffle', + description='Model type [burger, waffle, waffle_pi]' + ) + + x_pos_arg = DeclareLaunchArgument( + 'x_pos', + default_value='0.0', + description='Initial X position of the robot' + ) + + y_pos_arg = DeclareLaunchArgument( + 'y_pos', + default_value='2.0', + description='Initial Y position of the robot' + ) + + z_pos_arg = DeclareLaunchArgument( + 'z_pos', + default_value='0.0', + description='Initial Z position of the robot' + ) + + # Launch configurations + use_sim_time = LaunchConfiguration('use_sim_time') + # rviz_config = LaunchConfiguration('rviz_config') + model = LaunchConfiguration('model') + x_pos = LaunchConfiguration('x_pos') + y_pos = LaunchConfiguration('y_pos') + z_pos = LaunchConfiguration('z_pos') + + # Set the /use_sim_time parameter + use_sim_time_param = Node( + package='rclcpp_components', + executable='parameter_server', + name='use_sim_time_param', + parameters=[{'use_sim_time': use_sim_time}] + ) + + # Include the Gazebo empty world launch file + gazebo_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + PathJoinSubstitution([ + gazebo_ros_dir, 'launch', 'gazebo.launch.py' + ]) + ), + launch_arguments={ + 'world': PathJoinSubstitution([ + turtlebot3_gazebo_dir, 'worlds', 'turtlebot3_world.world' + ]), + 'paused': 'false', + 'use_sim_time': use_sim_time, + 'gui': 'true', + 'headless': 'false', + 'debug': 'false' + }.items() + ) + + # Generate robot_description from urdf + robot_description_content = Command([ + 'cat ', + PathJoinSubstitution([ + turtlebot3_description_dir, 'urdf', 'turtlebot3_waffle.urdf' + ]) + ]) + + robot_description = {'robot_description': robot_description_content} + + # Robot State Publisher + robot_state_publisher_node = Node( + package='robot_state_publisher', + executable='robot_state_publisher', + name='waffle_state_publisher', + output='screen', + parameters=[ + robot_description, + { + 'use_sim_time': use_sim_time, + 'publish_frequency': 50.0 # Set publishing frequency to 50 Hz + } + ] + ) + + # Spawn the TurtleBot3 model in Gazebo + spawn_entity = Node( + package='gazebo_ros', + executable='spawn_entity.py', + arguments=[ + '-entity', PathJoinSubstitution([ + TextSubstitution(text='turtlebot3_'), + model + ]), + '-topic', 'robot_description', + '-x', x_pos, + '-y', y_pos, + '-z', z_pos + ], + output='screen' + ) + + # Define LaunchDescription variable + ld = LaunchDescription() + + # Add the declared arguments + ld.add_action(use_sim_time_arg) + # ld.add_action(rviz_config_arg) + ld.add_action(model_arg) + ld.add_action(x_pos_arg) + ld.add_action(y_pos_arg) + ld.add_action(z_pos_arg) + + # Add actions + ld.add_action(gazebo_launch) + # ld.add_action(joint_state_publisher_node) + ld.add_action(robot_state_publisher_node) + ld.add_action(spawn_entity) + # ld.add_action(rviz_node) + + return ld \ No newline at end of file diff --git a/elevation_mapping_cupy/launch/turtlesim_plane_decomposition_example.launch b/elevation_mapping_cupy/launch/turtlesim_plane_decomposition_example.launch new file mode 100644 index 00000000..09bfb7c9 --- /dev/null +++ b/elevation_mapping_cupy/launch/turtlesim_plane_decomposition_example.launch @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/elevation_mapping_cupy/launch/turtlesim_plane_decomposition_example.launch.py b/elevation_mapping_cupy/launch/turtlesim_plane_decomposition_example.launch.py new file mode 100644 index 00000000..b1e8a178 --- /dev/null +++ b/elevation_mapping_cupy/launch/turtlesim_plane_decomposition_example.launch.py @@ -0,0 +1,41 @@ +from launch import LaunchDescription +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import PathJoinSubstitution +from ament_index_python.packages import get_package_share_directory + + +def generate_launch_description(): + elevation_mapping_cupy_dir = get_package_share_directory('elevation_mapping_cupy') + convex_plane_decomposition_dir = get_package_share_directory('convex_plane_decomposition_ros') + + return LaunchDescription([ + # Launch elevation mapping turtle sim + IncludeLaunchDescription( + PythonLaunchDescriptionSource( + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'launch', + 'turtlesim_simple_example.launch.py' + ]) + ), + launch_arguments={ + 'rviz_config': PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'rviz', + 'turtle_segmentation_example.rviz' + ]) + }.items() + ), + + # Launch the plane decomposition node + IncludeLaunchDescription( + PythonLaunchDescriptionSource( + PathJoinSubstitution([ + convex_plane_decomposition_dir, + 'launch', + 'convex_plane_decomposition.launch.py' + ]) + ) + ) + ]) \ No newline at end of file diff --git a/elevation_mapping_cupy/launch/turtlesim_semantic_image_example.launch b/elevation_mapping_cupy/launch/turtlesim_semantic_image_example.launch new file mode 100644 index 00000000..05064d0a --- /dev/null +++ b/elevation_mapping_cupy/launch/turtlesim_semantic_image_example.launch @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/elevation_mapping_cupy/launch/turtlesim_semantic_image_example.launch.py b/elevation_mapping_cupy/launch/turtlesim_semantic_image_example.launch.py new file mode 100644 index 00000000..bcd2440e --- /dev/null +++ b/elevation_mapping_cupy/launch/turtlesim_semantic_image_example.launch.py @@ -0,0 +1,68 @@ +from launch import LaunchDescription +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch_ros.actions import Node +from launch.substitutions import PathJoinSubstitution +from ament_index_python.packages import get_package_share_directory + + +def generate_launch_description(): + elevation_mapping_cupy_dir = get_package_share_directory('elevation_mapping_cupy') + semantic_sensor_dir = get_package_share_directory('semantic_sensor') + + return LaunchDescription([ + # Include the turtlesim_init launch file + IncludeLaunchDescription( + PythonLaunchDescriptionSource( + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'launch', + 'turtlesim_init.launch.py' + ]) + ), + launch_arguments={ + 'rviz_config': PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'rviz', + 'turtle_example.rviz' + ]) + }.items() + ), + + # Semantic Sensor Image Node + Node( + package='semantic_sensor', + executable='image_node.py', + name='front_cam', + arguments=['front_cam_image'], + parameters=[PathJoinSubstitution([ + semantic_sensor_dir, + 'config', + 'sensor_parameter.yaml' + ])], + output='screen' + ), + + # Elevation Mapping Node + Node( + package='elevation_mapping_cupy', + executable='elevation_mapping_node', + name='elevation_mapping', + parameters=[ + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'config', + 'core', + 'core_param.yaml' + ]), + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'config', + 'setups', + 'turtle_bot', + 'turtle_bot_semantics_image.yaml' + ]) + ], + output='screen' + ) + ]) \ No newline at end of file diff --git a/elevation_mapping_cupy/launch/turtlesim_semantic_pointcloud_example.launch b/elevation_mapping_cupy/launch/turtlesim_semantic_pointcloud_example.launch new file mode 100644 index 00000000..a8286746 --- /dev/null +++ b/elevation_mapping_cupy/launch/turtlesim_semantic_pointcloud_example.launch @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff --git a/elevation_mapping_cupy/launch/turtlesim_semantic_pointcloud_example.launch.py b/elevation_mapping_cupy/launch/turtlesim_semantic_pointcloud_example.launch.py new file mode 100644 index 00000000..68da1935 --- /dev/null +++ b/elevation_mapping_cupy/launch/turtlesim_semantic_pointcloud_example.launch.py @@ -0,0 +1,68 @@ +from launch import LaunchDescription +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch_ros.actions import Node +from launch.substitutions import PathJoinSubstitution +from ament_index_python.packages import get_package_share_directory + + +def generate_launch_description(): + elevation_mapping_cupy_dir = get_package_share_directory('elevation_mapping_cupy') + semantic_sensor_dir = get_package_share_directory('semantic_sensor') + + return LaunchDescription([ + # Include the turtlesim_init launch file + IncludeLaunchDescription( + PythonLaunchDescriptionSource( + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'launch', + 'turtlesim_init.launch.py' + ]) + ), + launch_arguments={ + 'rviz_config': PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'rviz', + 'turtle_segmentation_example.rviz' + ]) + }.items() + ), + + # Semantic Sensor Node + Node( + package='semantic_sensor', + executable='pointcloud_node.py', + name='front_cam', + arguments=['front_cam'], + parameters=[PathJoinSubstitution([ + semantic_sensor_dir, + 'config', + 'sensor_parameter.yaml' + ])], + output='screen' + ), + + # Elevation Mapping Node + Node( + package='elevation_mapping_cupy', + executable='elevation_mapping_node', + name='elevation_mapping', + parameters=[ + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'config', + 'core', + 'core_param.yaml' + ]), + PathJoinSubstitution([ + elevation_mapping_cupy_dir, + 'config', + 'setups', + 'turtle_bot', + 'turtle_bot_semantics_pointcloud.yaml' + ]) + ], + output='screen' + ) + ]) \ No newline at end of file diff --git a/elevation_mapping_cupy/package.xml b/elevation_mapping_cupy/package.xml new file mode 100644 index 00000000..f6880b52 --- /dev/null +++ b/elevation_mapping_cupy/package.xml @@ -0,0 +1,79 @@ + + + elevation_mapping_cupy + 2.0.0 + Elevation mapping on GPU + Takahiro Miki + MIT + + + ament_cmake + ament_cmake_python + + + + + + rclpy + + rclcpp + std_msgs + std_srvs + sensor_msgs + geometry_msgs + + point_cloud_transport + message_filters + elevation_map_msgs + grid_map_msgs + grid_map_ros + image_transport + pcl_ros + pybind11-dev + ros2_numpy + python3-scipy + + numpy_lessthan_2 + tf2_ros + cv_bridge + ament_index_python + tf_transformations + + + + rviz2 + + robot_state_publisher + python3-opencv + cupy-cuda12x + simple-parsing + python3-shapely + python3-ruamel.yaml + boost + python3-transforms3d + + + gazebo_ros_pkgs + turtlebot3_msgs + turtlebot3_teleop + camera_info_manager + camera_calibration_parsers + + + + + + + + + ament_lint_auto + ament_lint_common + + + + ament_cmake + + + + + \ No newline at end of file diff --git a/elevation_mapping_cupy/resource/elevation_mapping_cupy b/elevation_mapping_cupy/resource/elevation_mapping_cupy new file mode 100644 index 00000000..e69de29b diff --git a/elevation_mapping_cupy/rviz/turtle_example.rviz b/elevation_mapping_cupy/rviz/turtle_example.rviz new file mode 100644 index 00000000..0cd124c8 --- /dev/null +++ b/elevation_mapping_cupy/rviz/turtle_example.rviz @@ -0,0 +1,286 @@ +Panels: + - Class: rviz/Displays + Help Height: 70 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /ElevationMapRaw1 + - /Feature1 + Splitter Ratio: 0.5768463015556335 + Tree Height: 234 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Name: Time + SyncMode: 0 + SyncSource: Color +Preferences: + PromptSaveOnExit: true +Toolbars: + toolButtonStyle: 2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.029999999329447746 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: false + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: rgb + Color Transformer: ColorLayer + Enabled: true + Height Layer: elevation + Height Transformer: "" + History Length: 1 + Invert Rainbow: false + Max Color: 238; 238; 236 + Max Intensity: 1 + Min Color: 32; 74; 135 + Min Intensity: 0 + Name: ElevationMapRaw + Show Grid Lines: true + Topic: /elevation_mapping/elevation_map_raw + Unreliable: false + Use Rainbow: false + Value: true + - Alpha: 1 + Class: rviz/RobotModel + Collision Enabled: false + Enabled: true + Links: + All Links Enabled: true + Expand Joint Details: false + Expand Link Details: false + Expand Tree: false + Link Tree Style: Links in Alphabetic Order + base_footprint: + Alpha: 1 + Show Axes: false + Show Trail: false + base_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + base_scan: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_depth_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_depth_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_rgb_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_rgb_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + caster_back_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + caster_back_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + imu_link: + Alpha: 1 + Show Axes: false + Show Trail: false + wheel_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + wheel_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + Name: RobotModel + Robot Description: robot_description + TF Prefix: "" + Update Interval: 0 + Value: true + Visual Enabled: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Min Color: 0; 0; 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.009999999776482582 + Style: Flat Squares + Topic: /camera/depth/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Class: rviz/Image + Enabled: true + Image Topic: /camera/rgb/image_raw + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Color + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /front_cam/semantic_image_debug + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Segmentation + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /front_cam/semantic_seg_feat_im + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Feature + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + Enabled: true + Global Options: + Background Color: 255; 255; 255 + Default Light: true + Fixed Frame: odom + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Theta std deviation: 0.2617993950843811 + Topic: /initialpose + X std deviation: 0.5 + Y std deviation: 0.5 + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 9.118926048278809 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Field of View: 0.7853981852531433 + Focal Point: + X: 2.046891927719116 + Y: -0.5291382074356079 + Z: -0.3964909017086029 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.4647962152957916 + Target Frame: base_footprint + Yaw: 3.113370418548584 + Saved: ~ +Window Geometry: + Color: + collapsed: false + Displays: + collapsed: false + Feature: + collapsed: false + Height: 1306 + Hide Left Dock: false + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000001f700000464fc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b0000016b000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0043006f006c006f007201000001ac000000fd0000001600fffffffb00000018005300650067006d0065006e0074006100740069006f006e01000002af000000e40000001600fffffffb0000000e00460065006100740075007200650100000399000001060000001600ffffff000000010000015f0000053bfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000560000053b000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000005afc0100000002fb0000000800540069006d00650100000000000007800000030700fffffffb0000000800540069006d00650100000000000004500000000000000000000005830000046400000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Segmentation: + collapsed: false + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1920 + X: 2280 + Y: 68 diff --git a/elevation_mapping_cupy/rviz/turtle_features_example.rviz b/elevation_mapping_cupy/rviz/turtle_features_example.rviz new file mode 100644 index 00000000..95e38105 --- /dev/null +++ b/elevation_mapping_cupy/rviz/turtle_features_example.rviz @@ -0,0 +1,247 @@ +Panels: + - Class: rviz/Displays + Help Height: 138 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /ElevationMapRaw1 + Splitter Ratio: 0.5768463015556335 + Tree Height: 631 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: "" +Preferences: + PromptSaveOnExit: true +Toolbars: + toolButtonStyle: 2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.029999999329447746 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: false + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: pca + Color Transformer: ColorLayer + ColorMap: default + Enabled: true + Grid Cell Decimation: 1 + Grid Line Thickness: 0.10000000149011612 + Height Layer: elevation + Height Transformer: "" + History Length: 1 + Invert ColorMap: false + Max Color: 238; 238; 236 + Max Intensity: 1 + Min Color: 32; 74; 135 + Min Intensity: 0 + Name: ElevationMapRaw + Show Grid Lines: true + Topic: /elevation_mapping/elevation_map_raw + Unreliable: false + Use ColorMap: true + Value: true + - Alpha: 1 + Class: rviz/RobotModel + Collision Enabled: false + Enabled: true + Links: + All Links Enabled: true + Expand Joint Details: false + Expand Link Details: false + Expand Tree: false + Link Tree Style: Links in Alphabetic Order + base_footprint: + Alpha: 1 + Show Axes: false + Show Trail: false + base_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + base_scan: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_depth_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_depth_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_rgb_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_rgb_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + caster_back_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + caster_back_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + imu_link: + Alpha: 1 + Show Axes: false + Show Trail: false + wheel_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + wheel_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + Name: RobotModel + Robot Description: robot_description + TF Prefix: "" + Update Interval: 0 + Value: true + Visual Enabled: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Min Color: 0; 0; 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.009999999776482582 + Style: Flat Squares + Topic: /camera/depth/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + Enabled: true + Global Options: + Background Color: 255; 255; 255 + Default Light: true + Fixed Frame: odom + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Theta std deviation: 0.2617993950843811 + Topic: /initialpose + X std deviation: 0.5 + Y std deviation: 0.5 + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 9.118926048278809 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Field of View: 0.7853981852531433 + Focal Point: + X: 2.046891927719116 + Y: -0.5291382074356079 + Z: -0.3964909017086029 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.4647965431213379 + Target Frame: base_footprint + Yaw: 2.9933695793151855 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1016 + Hide Left Dock: false + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000001f70000033efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000033e000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000015f0000053bfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000560000053b000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007380000005afc0100000002fb0000000800540069006d0065010000000000000738000002eb00fffffffb0000000800540069006d006501000000000000045000000000000000000000053b0000033e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1848 + X: 72 + Y: 27 diff --git a/elevation_mapping_cupy/rviz/turtle_segmentation_example.rviz b/elevation_mapping_cupy/rviz/turtle_segmentation_example.rviz new file mode 100644 index 00000000..df44f1b6 --- /dev/null +++ b/elevation_mapping_cupy/rviz/turtle_segmentation_example.rviz @@ -0,0 +1,304 @@ +Panels: + - Class: rviz/Displays + Help Height: 138 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /ElevationMapRaw1 + - /ElevationMapFilter1 + Splitter Ratio: 0.5768463015556335 + Tree Height: 631 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: "" +Preferences: + PromptSaveOnExit: true +Toolbars: + toolButtonStyle: 2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.029999999329447746 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: false + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: rgb + Color Transformer: ColorLayer + ColorMap: default + Enabled: true + Grid Cell Decimation: 1 + Grid Line Thickness: 0.10000000149011612 + Height Layer: elevation + Height Transformer: "" + History Length: 1 + Invert ColorMap: false + Max Color: 238; 238; 236 + Max Intensity: 1 + Min Color: 32; 74; 135 + Min Intensity: 0 + Name: ElevationMapRaw + Show Grid Lines: true + Topic: /elevation_mapping/elevation_map_raw + Unreliable: false + Use ColorMap: true + Value: true + - Alpha: 0.10000000149011612 + Autocompute Intensity Bounds: true + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: min_filter + Color Transformer: GridMapLayer + ColorMap: default + Enabled: true + Grid Cell Decimation: 1 + Grid Line Thickness: 0.10000000149011612 + Height Layer: min_filter + Height Transformer: GridMapLayer + History Length: 1 + Invert ColorMap: false + Max Color: 206; 92; 0 + Max Intensity: 10 + Min Color: 255; 255; 255 + Min Intensity: 0 + Name: ElevationMapFilter + Show Grid Lines: true + Topic: /elevation_mapping/elevation_map_filter + Unreliable: false + Use ColorMap: true + Value: true + - Alpha: 1 + Class: rviz/RobotModel + Collision Enabled: false + Enabled: true + Links: + All Links Enabled: true + Expand Joint Details: false + Expand Link Details: false + Expand Tree: false + Link Tree Style: Links in Alphabetic Order + base_footprint: + Alpha: 1 + Show Axes: false + Show Trail: false + base_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + base_scan: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_depth_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_depth_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_rgb_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_rgb_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + caster_back_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + caster_back_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + imu_link: + Alpha: 1 + Show Axes: false + Show Trail: false + wheel_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + wheel_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + Name: RobotModel + Robot Description: robot_description + TF Prefix: "" + Update Interval: 0 + Value: true + Visual Enabled: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Min Color: 0; 0; 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.009999999776482582 + Style: Flat Squares + Topic: /camera/depth/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /convex_plane_decomposition_ros/boundaries + Name: PlaneBoundaries + Namespaces: + {} + Queue Size: 100 + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: segmentation + Color Transformer: GridMapLayer + ColorMap: default + Enabled: true + Grid Cell Decimation: 1 + Grid Line Thickness: 0.10000000149011612 + Height Layer: elevation_before_postprocess + Height Transformer: GridMapLayer + History Length: 1 + Invert ColorMap: false + Max Color: 238; 238; 236 + Max Intensity: 10 + Min Color: 32; 74; 135 + Min Intensity: 0 + Name: PlaneSegmentation + Show Grid Lines: true + Topic: /convex_plane_decomposition_ros/filtered_map + Unreliable: false + Use ColorMap: true + Value: true + Enabled: true + Global Options: + Background Color: 255; 255; 255 + Default Light: true + Fixed Frame: odom + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Theta std deviation: 0.2617993950843811 + Topic: /initialpose + X std deviation: 0.5 + Y std deviation: 0.5 + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 6.288029193878174 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Field of View: 0.7853981852531433 + Focal Point: + X: 2.046891927719116 + Y: -0.5291382074356079 + Z: -0.3964909017086029 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.5447964668273926 + Target Frame: base_footprint + Yaw: 3.108370780944824 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1016 + Hide Left Dock: false + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000001f70000033efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000033e000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000015f0000053bfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000560000053b000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007380000005afc0100000002fb0000000800540069006d0065010000000000000738000002eb00fffffffb0000000800540069006d006501000000000000045000000000000000000000053b0000033e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1848 + X: 72 + Y: 27 diff --git a/elevation_mapping_cupy/rviz/turtle_semantic_example.rviz b/elevation_mapping_cupy/rviz/turtle_semantic_example.rviz new file mode 100644 index 00000000..b5b19b90 --- /dev/null +++ b/elevation_mapping_cupy/rviz/turtle_semantic_example.rviz @@ -0,0 +1,246 @@ +Panels: + - Class: rviz/Displays + Help Height: 138 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + Splitter Ratio: 0.5768463015556335 + Tree Height: 658 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: "" +Preferences: + PromptSaveOnExit: true +Toolbars: + toolButtonStyle: 2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.029999999329447746 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: false + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: rgb + Color Transformer: ColorLayer + ColorMap: default + Enabled: true + Grid Cell Decimation: 1 + Grid Line Thickness: 0.10000000149011612 + Height Layer: elevation + Height Transformer: "" + History Length: 1 + Invert ColorMap: false + Max Color: 238; 238; 236 + Max Intensity: 1 + Min Color: 32; 74; 135 + Min Intensity: 0 + Name: ElevationMapRaw + Show Grid Lines: true + Topic: /elevation_mapping/elevation_map_raw + Unreliable: false + Use ColorMap: true + Value: true + - Alpha: 1 + Class: rviz/RobotModel + Collision Enabled: false + Enabled: true + Links: + All Links Enabled: true + Expand Joint Details: false + Expand Link Details: false + Expand Tree: false + Link Tree Style: Links in Alphabetic Order + base_footprint: + Alpha: 1 + Show Axes: false + Show Trail: false + base_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + base_scan: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_depth_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_depth_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_rgb_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_rgb_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + caster_back_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + caster_back_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + imu_link: + Alpha: 1 + Show Axes: false + Show Trail: false + wheel_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + wheel_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + Name: RobotModel + Robot Description: robot_description + TF Prefix: "" + Update Interval: 0 + Value: true + Visual Enabled: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Min Color: 0; 0; 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.009999999776482582 + Style: Flat Squares + Topic: /camera/depth/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + Enabled: true + Global Options: + Background Color: 255; 255; 255 + Default Light: true + Fixed Frame: odom + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Theta std deviation: 0.2617993950843811 + Topic: /initialpose + X std deviation: 0.5 + Y std deviation: 0.5 + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 9.118926048278809 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Field of View: 0.7853981852531433 + Focal Point: + X: 2.046891927719116 + Y: -0.5291382074356079 + Z: -0.3964909017086029 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.5947964191436768 + Target Frame: base_footprint + Yaw: 3.0983715057373047 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1043 + Hide Left Dock: false + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000001f700000359fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000359000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000015f0000053bfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000560000053b000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000005afc0100000002fb0000000800540069006d0065010000000000000780000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000005830000035900000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1920 + X: 1920 + Y: 0 diff --git a/elevation_mapping_cupy/rviz/turtle_sim_laser.rviz b/elevation_mapping_cupy/rviz/turtle_sim_laser.rviz new file mode 100644 index 00000000..8f247cfd --- /dev/null +++ b/elevation_mapping_cupy/rviz/turtle_sim_laser.rviz @@ -0,0 +1,423 @@ +Panels: + - Class: rviz_common/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /Status1 + Splitter Ratio: 0.5 + Tree Height: 336 + - Class: rviz_common/Selection + Name: Selection + - Class: rviz_common/Tool Properties + Expanded: + - /2D Goal Pose1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz_common/Views + Expanded: + - /Current View1 + - /Current View1/Focal Point1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz_common/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: PointCloud2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz_default_plugins/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.029999999329447746 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz_default_plugins/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Selectable: true + Size (Pixels): 3 + Size (m): 0.009999999776482582 + Style: Points + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /intel_realsense_r200_depth/points + Use Fixed Frame: true + Use rainbow: true + Value: true + - Alpha: 1 + Class: rviz_default_plugins/RobotModel + Collision Enabled: false + Description File: "" + Description Source: Topic + Description Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /robot_description + Enabled: true + Links: + All Links Enabled: true + Expand Joint Details: false + Expand Link Details: false + Expand Tree: false + Link Tree Style: Links in Alphabetic Order + base_footprint: + Alpha: 1 + Show Axes: false + Show Trail: false + base_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + base_scan: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + camera_depth_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_rgb_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + camera_rgb_optical_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + caster_back_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + caster_back_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + imu_link: + Alpha: 1 + Show Axes: false + Show Trail: false + realsense_depth_frame: + Alpha: 1 + Show Axes: false + Show Trail: false + realsense_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + wheel_left_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + wheel_right_link: + Alpha: 1 + Show Axes: false + Show Trail: false + Value: true + Mass Properties: + Inertia: false + Mass: false + Name: RobotModel + TF Prefix: "" + Update Interval: 0 + Value: true + Visual Enabled: true + - Class: rviz_default_plugins/TF + Enabled: true + Frame Timeout: 15 + Frames: + All Enabled: true + base_footprint: + Value: true + base_link: + Value: true + base_scan: + Value: true + camera_depth_optical_frame: + Value: true + camera_rgb_frame: + Value: true + camera_rgb_optical_frame: + Value: true + caster_back_left_link: + Value: true + caster_back_right_link: + Value: true + imu_link: + Value: true + odom: + Value: true + realsense_depth_frame: + Value: true + realsense_link: + Value: true + wheel_left_link: + Value: true + wheel_right_link: + Value: true + Marker Scale: 1 + Name: TF + Show Arrows: true + Show Axes: true + Show Names: false + Tree: + odom: + base_footprint: + base_link: + base_scan: + {} + caster_back_left_link: + {} + caster_back_right_link: + {} + imu_link: + {} + realsense_link: + camera_rgb_frame: + camera_rgb_optical_frame: + {} + realsense_depth_frame: + camera_depth_optical_frame: + {} + wheel_left_link: + {} + wheel_right_link: + {} + Update Interval: 0 + Value: true + - Class: rviz_default_plugins/Camera + Enabled: false + Far Plane Distance: 100 + Image Rendering: background and overlay + Name: Camera + Overlay Alpha: 0.5 + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /intel_realsense_r200_rgb/image_raw + Value: false + Visibility: + Grid: true + GridMap: true + GridMapFiltered: true + Image: true + PointCloud2: true + RobotModel: true + TF: true + Value: true + Zoom Factor: 1 + - Class: rviz_default_plugins/Image + Enabled: true + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /intel_realsense_r200_depth/image_raw + Value: true + - Class: rviz_default_plugins/Image + Enabled: true + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /intel_realsense_r200_rgb/image_raw + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: rgb + Color Transformer: ColorLayer + Enabled: true + Height Layer: elevation + Height Transformer: GridMapLayer + History Length: 1 + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 10 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: GridMap + Show Grid Lines: true + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /elevation_mapping_node/elevation_map_raw + Use Rainbow: true + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: elevation + Color Transformer: IntensityLayer + Enabled: false + Height Layer: smooth + Height Transformer: GridMapLayer + History Length: 1 + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 10 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: GridMapFiltered + Show Grid Lines: true + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /elevation_mapping_node/elevation_map_filter + Use Rainbow: true + Value: false + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: odom + Frame Rate: 30 + Name: root + Tools: + - Class: rviz_default_plugins/Interact + Hide Inactive Objects: true + - Class: rviz_default_plugins/MoveCamera + - Class: rviz_default_plugins/Select + - Class: rviz_default_plugins/FocusCamera + - Class: rviz_default_plugins/Measure + Line color: 128; 128; 0 + - Class: rviz_default_plugins/SetInitialPose + Covariance x: 0.25 + Covariance y: 0.25 + Covariance yaw: 0.06853891909122467 + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /initialpose + - Class: rviz_default_plugins/SetGoal + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /goal_pose + - Class: rviz_default_plugins/PublishPoint + Single click: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /clicked_point + Transformation: + Current: + Class: rviz_default_plugins/TF + Value: true + Views: + Current: + Class: rviz_default_plugins/ThirdPersonFollower + Distance: 3.38887095451355 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: -0.7572358250617981 + Y: -0.10913658142089844 + Z: -0.018636569380760193 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.6897954940795898 + Target Frame: base_link + Value: ThirdPersonFollower (rviz_default_plugins) + Yaw: 3.08858585357666 + Saved: ~ +Window Geometry: + Camera: + collapsed: false + Displays: + collapsed: false + Height: 954 + Hide Left Dock: false + Hide Right Dock: false + Image: + collapsed: false + QMainWindow State: 000000ff00000000fd00000004000000000000025f00000320fc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000001d9000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000c00430061006d0065007200610000000136000000f70000002800fffffffb0000000a0049006d0061006700650000000157000000d60000002800fffffffb0000000a0049006d006100670065010000021a000001410000002800ffffff000000010000010f00000320fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003b00000320000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003efc0100000002fb0000000800540069006d00650100000000000007800000025300fffffffb0000000800540069006d00650100000000000004500000000000000000000004060000032000000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: false + Width: 1920 + X: 1920 + Y: 27 diff --git a/elevation_mapping_cupy/scripts/elevation_mapping_node.py b/elevation_mapping_cupy/scripts/elevation_mapping_node.py new file mode 100755 index 00000000..5c599d9a --- /dev/null +++ b/elevation_mapping_cupy/scripts/elevation_mapping_node.py @@ -0,0 +1,495 @@ +#!/usr/bin/env python3 +import numpy as np +import os +from functools import partial + +import rclpy +from rclpy.node import Node +from rclpy.qos import QoSPresetProfiles +from ament_index_python.packages import get_package_share_directory +import ros2_numpy as rnp +from sensor_msgs.msg import PointCloud2, Image, CameraInfo +from sensor_msgs_py import point_cloud2 +from tf_transformations import quaternion_matrix +import tf2_ros +import message_filters +from cv_bridge import CvBridge +from rclpy.duration import Duration +from grid_map_msgs.msg import GridMap +from std_msgs.msg import Float32MultiArray +from std_msgs.msg import MultiArrayLayout as MAL +from std_msgs.msg import MultiArrayDimension as MAD +from rclpy.serialization import serialize_message +from elevation_mapping_cupy import ElevationMap, Parameter + +PDC_DATATYPE = { + "1": np.int8, + "2": np.uint8, + "3": np.int16, + "4": np.uint16, + "5": np.int32, + "6": np.uint32, + "7": np.float32, + "8": np.float64, +} + +class ElevationMappingNode(Node): + def __init__(self): + super().__init__( + 'elevation_mapping_node', + automatically_declare_parameters_from_overrides=True, + allow_undeclared_parameters=True + ) + + self.root = get_package_share_directory("elevation_mapping_cupy") + weight_file = os.path.join(self.root, "config/core/weights.dat") + plugin_config_file = os.path.join(self.root, "config/core/plugin_config.yaml") + + # Initialize parameters with some defaults + self.param = Parameter( + use_chainer=False, + weight_file=weight_file, + plugin_config_file=plugin_config_file + ) + + # Read ROS parameters (including YAML) + self.initialize_ros() + self.set_param_values_from_ros() + + # Overwrite subscriber_cfg from loaded YAML + self.param.subscriber_cfg = self.my_subscribers + + self.initialize_elevation_mapping() + self.register_subscribers() + self.register_publishers() + self.register_timers() + self._last_t = None + + def initialize_elevation_mapping(self) -> None: + self.param.update() + self._pointcloud_process_counter = 0 + self._image_process_counter = 0 + self._map = ElevationMap(self.param) + self._map_data = np.zeros( + (self._map.cell_n - 2, self._map.cell_n - 2), dtype=np.float32 + ) + self.get_logger().info(f"Initialized map with length: {self._map.map_length}, resolution: {self._map.resolution}, cells: {self._map.cell_n}") + + self._map_q = None + self._map_t = None + + def initialize_ros(self) -> None: + self._tf_buffer = tf2_ros.Buffer() + self._listener = tf2_ros.TransformListener(self._tf_buffer, self) + self.get_ros_params() + + def get_ros_params(self) -> None: + self.use_chainer = self.get_parameter('use_chainer').get_parameter_value().bool_value + self.weight_file = self.get_parameter('weight_file').get_parameter_value().string_value + self.plugin_config_file = self.get_parameter('plugin_config_file').get_parameter_value().string_value + self.initialize_frame_id = self.get_parameter('initialize_frame_id').get_parameter_value().string_value + self.initialize_tf_offset = self.get_parameter('initialize_tf_offset').get_parameter_value().double_array_value + self.map_frame = self.get_parameter('map_frame').get_parameter_value().string_value + self.base_frame = self.get_parameter('base_frame').get_parameter_value().string_value + self.corrected_map_frame = self.get_parameter('corrected_map_frame').get_parameter_value().string_value + self.initialize_method = self.get_parameter('initialize_method').get_parameter_value().string_value + self.position_lowpass_alpha = self.get_parameter('position_lowpass_alpha').get_parameter_value().double_value + self.orientation_lowpass_alpha = self.get_parameter('orientation_lowpass_alpha').get_parameter_value().double_value + self.recordable_fps = self.get_parameter('recordable_fps').get_parameter_value().double_value + self.update_variance_fps = self.get_parameter('update_variance_fps').get_parameter_value().double_value + self.time_interval = self.get_parameter('time_interval').get_parameter_value().double_value + self.update_pose_fps = self.get_parameter('update_pose_fps').get_parameter_value().double_value + self.initialize_tf_grid_size = self.get_parameter('initialize_tf_grid_size').get_parameter_value().double_value + self.map_acquire_fps = self.get_parameter('map_acquire_fps').get_parameter_value().double_value + self.publish_statistics_fps = self.get_parameter('publish_statistics_fps').get_parameter_value().double_value + self.enable_pointcloud_publishing = self.get_parameter('enable_pointcloud_publishing').get_parameter_value().bool_value + self.enable_normal_arrow_publishing = self.get_parameter('enable_normal_arrow_publishing').get_parameter_value().bool_value + self.enable_drift_corrected_TF_publishing = self.get_parameter('enable_drift_corrected_TF_publishing').get_parameter_value().bool_value + self.use_initializer_at_start = self.get_parameter('use_initializer_at_start').get_parameter_value().bool_value + subscribers_params = self.get_parameters_by_prefix('subscribers') + self.my_subscribers = {} + for param_name, param_value in subscribers_params.items(): + parts = param_name.split('.') + if len(parts) >= 2: + sub_key, sub_param = parts[:2] + if sub_key not in self.my_subscribers: + self.my_subscribers[sub_key] = {} + self.my_subscribers[sub_key][sub_param] = param_value.value + publishers_params = self.get_parameters_by_prefix('publishers') + self.my_publishers = {} + for param_name, param_value in publishers_params.items(): + parts = param_name.split('.') + if len(parts) >= 2: + pub_key, pub_param = parts[:2] + if pub_key not in self.my_publishers: + self.my_publishers[pub_key] = {} + self.my_publishers[pub_key][pub_param] = param_value.value + + + def set_param_values_from_ros(self): + # Assign to self.param so it won't use defaults + # Use try/except so missing params won't cause errors + try: self.param.resolution = self.get_parameter('resolution').get_parameter_value().double_value + except: pass + try: self.param.map_length = self.get_parameter('map_length').get_parameter_value().double_value + except: pass + try: self.param.sensor_noise_factor = self.get_parameter('sensor_noise_factor').get_parameter_value().double_value + except: pass + try: self.param.mahalanobis_thresh = self.get_parameter('mahalanobis_thresh').get_parameter_value().double_value + except: pass + try: self.param.outlier_variance = self.get_parameter('outlier_variance').get_parameter_value().double_value + except: pass + try: + # The YAML has 'drift_compensation_variance_inler', but our param is 'drift_compensation_variance_inlier' + self.param.drift_compensation_variance_inlier = self.get_parameter('drift_compensation_variance_inler').get_parameter_value().double_value + except: pass + try: self.param.max_drift = self.get_parameter('max_drift').get_parameter_value().double_value + except: pass + try: self.param.drift_compensation_alpha = self.get_parameter('drift_compensation_alpha').get_parameter_value().double_value + except: pass + try: self.param.time_variance = self.get_parameter('time_variance').get_parameter_value().double_value + except: pass + try: self.param.max_variance = self.get_parameter('max_variance').get_parameter_value().double_value + except: pass + try: self.param.initial_variance = self.get_parameter('initial_variance').get_parameter_value().double_value + except: pass + try: self.param.traversability_inlier = self.get_parameter('traversability_inlier').get_parameter_value().double_value + except: pass + try: self.param.dilation_size = self.get_parameter('dilation_size').get_parameter_value().integer_value + except: pass + try: self.param.wall_num_thresh = self.get_parameter('wall_num_thresh').get_parameter_value().double_value + except: pass + try: self.param.min_height_drift_cnt = self.get_parameter('min_height_drift_cnt').get_parameter_value().double_value + except: pass + try: self.param.position_noise_thresh = self.get_parameter('position_noise_thresh').get_parameter_value().double_value + except: pass + try: self.param.orientation_noise_thresh = self.get_parameter('orientation_noise_thresh').get_parameter_value().double_value + except: pass + try: self.param.min_valid_distance = self.get_parameter('min_valid_distance').get_parameter_value().double_value + except: pass + try: self.param.max_height_range = self.get_parameter('max_height_range').get_parameter_value().double_value + except: pass + try: self.param.ramped_height_range_a = self.get_parameter('ramped_height_range_a').get_parameter_value().double_value + except: pass + try: self.param.ramped_height_range_b = self.get_parameter('ramped_height_range_b').get_parameter_value().double_value + except: pass + try: self.param.ramped_height_range_c = self.get_parameter('ramped_height_range_c').get_parameter_value().double_value + except: pass + try: self.param.max_ray_length = self.get_parameter('max_ray_length').get_parameter_value().double_value + except: pass + try: self.param.cleanup_step = self.get_parameter('cleanup_step').get_parameter_value().double_value + except: pass + try: self.param.cleanup_cos_thresh = self.get_parameter('cleanup_cos_thresh').get_parameter_value().double_value + except: pass + try: self.param.safe_thresh = self.get_parameter('safe_thresh').get_parameter_value().double_value + except: pass + try: self.param.safe_min_thresh = self.get_parameter('safe_min_thresh').get_parameter_value().double_value + except: pass + try: self.param.max_unsafe_n = self.get_parameter('max_unsafe_n').get_parameter_value().integer_value + except: pass + try: self.param.overlap_clear_range_xy = self.get_parameter('overlap_clear_range_xy').get_parameter_value().double_value + except: pass + try: self.param.overlap_clear_range_z = self.get_parameter('overlap_clear_range_z').get_parameter_value().double_value + except: pass + try: self.param.enable_edge_sharpen = self.get_parameter('enable_edge_sharpen').get_parameter_value().bool_value + except: pass + try: self.param.enable_visibility_cleanup = self.get_parameter('enable_visibility_cleanup').get_parameter_value().bool_value + except: pass + try: self.param.enable_drift_compensation = self.get_parameter('enable_drift_compensation').get_parameter_value().bool_value + except: pass + try: self.param.enable_overlap_clearance = self.get_parameter('enable_overlap_clearance').get_parameter_value().bool_value + except: pass + try: self.param.use_only_above_for_upper_bound = self.get_parameter('use_only_above_for_upper_bound').get_parameter_value().bool_value + except: pass + + def register_subscribers(self) -> None: + if any(config.get("data_type") == "image" for config in self.my_subscribers.values()): + self.cv_bridge = CvBridge() + + pointcloud_subs = {} + image_subs = {} + + for key, config in self.my_subscribers.items(): + data_type = config.get("data_type") + if data_type == "image": + topic_name_camera = config.get("topic_name_camera", "/camera/image") + topic_name_camera_info = config.get("topic_name_camera_info", "/camera/camera_info") + camera_sub = message_filters.Subscriber( + self, + Image, + topic_name_camera + ) + camera_info_sub = message_filters.Subscriber( + self, + CameraInfo, + topic_name_camera_info + ) + image_sync = message_filters.ApproximateTimeSynchronizer( + [camera_sub, camera_info_sub], queue_size=10, slop=0.5 + ) + image_sync.registerCallback(partial(self.image_callback, sub_key=key)) + image_subs[key] = image_sync + elif data_type == "pointcloud": + topic_name = config.get("topic_name", "/pointcloud") + # qos_profile = rclpy.qos.QoSProfile( + # depth=10, + # reliability=rclpy.qos.ReliabilityPolicy.BEST_EFFORT, + # durability=rclpy.qos.DurabilityPolicy.VOLATILE, + # history=rclpy.qos.HistoryPolicy.KEEP_LAST + # ) + # qos_profile = QoSPresetProfiles.get_from_short_key("sensor_data") + # qos_profile = rclpy.qos.QoSProfile(depth=10) + qos_profile = 10 + subscription = self.create_subscription( + PointCloud2, + topic_name, + partial(self.pointcloud_callback, sub_key=key), + qos_profile + ) + pointcloud_subs[key] = subscription + + def register_publishers(self) -> None: + self._publishers_dict = {} + self._publishers_timers = [] + + for pub_key, pub_config in self.my_publishers.items(): + topic_name = f"/{self.get_name()}/{pub_key}" + publisher = self.create_publisher(GridMap, topic_name, 10) + self._publishers_dict[pub_key] = publisher + + fps = pub_config.get("fps", 1.0) + timer = self.create_timer( + 1.0 / fps, + partial(self.publish_map, key=pub_key) + ) + self._publishers_timers.append(timer) + + def register_timers(self) -> None: + self.time_pose_update = self.create_timer( + 0.1, + self.pose_update + ) + self.timer_variance = self.create_timer( + 1.0 / self.update_variance_fps, + self.update_variance + ) + self.timer_time = self.create_timer( + self.time_interval, + self.update_time + ) + + def publish_map(self, key: str) -> None: + if self._map_q is None: + return + gm = GridMap() + gm.header.frame_id = self.map_frame + gm.header.stamp = self._last_t if self._last_t is not None else self.get_clock().now().to_msg() + gm.info.resolution = self._map.resolution + actual_map_length = (self._map.cell_n - 2) * self._map.resolution + gm.info.length_x = actual_map_length + gm.info.length_y = actual_map_length + gm.info.pose.position.x = self._map_t.x + gm.info.pose.position.y = self._map_t.y + gm.info.pose.position.z = 0.0 + gm.info.pose.orientation.w = 1.0 + gm.info.pose.orientation.x = 0.0 + gm.info.pose.orientation.y = 0.0 + gm.info.pose.orientation.z = 0.0 + gm.layers = [] + gm.basic_layers = self.my_publishers[key]["basic_layers"] + + for layer in self.my_publishers[key].get("layers", []): + gm.layers.append(layer) + self._map.get_map_with_name_ref(layer, self._map_data) + # After fixing CUDA kernels and removing flips in elevation_mapping.py, no flip needed here + map_data_for_gridmap = self._map_data + arr = Float32MultiArray() + arr.layout = MAL() + # Keep original dimension order but fix strides + arr.layout.dim.append(MAD(label="column_index", size=map_data_for_gridmap.shape[1], stride=map_data_for_gridmap.shape[0] * map_data_for_gridmap.shape[1])) + arr.layout.dim.append(MAD(label="row_index", size=map_data_for_gridmap.shape[0], stride=map_data_for_gridmap.shape[0])) + arr.data = map_data_for_gridmap.flatten().tolist() + gm.data.append(arr) + + gm.outer_start_index = 0 + gm.inner_start_index = 0 + self._publishers_dict[key].publish(gm) + + def safe_lookup_transform(self, target_frame, source_frame, time): + try: + return self._tf_buffer.lookup_transform( + target_frame, + source_frame, + time + ) + except tf2_ros.ExtrapolationException: + return self._tf_buffer.lookup_transform( + target_frame, + source_frame, + rclpy.time.Time() + ) + + def image_callback(self, camera_msg: Image, camera_info_msg: CameraInfo, sub_key: str) -> None: + self._last_t = camera_msg.header.stamp + try: + semantic_img = self.cv_bridge.imgmsg_to_cv2(camera_msg, desired_encoding="passthrough") + except: + return + if len(semantic_img.shape) != 2: + semantic_img = [semantic_img[:, :, k] for k in range(semantic_img.shape[2])] + else: + semantic_img = [semantic_img] + + K = np.array(camera_info_msg.k, dtype=np.float32).reshape(3, 3) + D = np.array(camera_info_msg.d, dtype=np.float32).reshape(-1, 1) + + transform_camera_to_map = self.safe_lookup_transform( + self.map_frame, + camera_msg.header.frame_id, + camera_msg.header.stamp + ) + t = transform_camera_to_map.transform.translation + q = transform_camera_to_map.transform.rotation + t_np = np.array([t.x, t.y, t.z], dtype=np.float32) + R = quaternion_matrix([q.x, q.y, q.z, q.w])[:3, :3].astype(np.float32) + self._map.input_image( + sub_key, semantic_img, R, t_np, K, D, + camera_info_msg.height, camera_info_msg.width + ) + self._image_process_counter += 1 + + def pointcloud_callback(self, msg: PointCloud2, sub_key: str) -> None: + self._last_t = msg.header.stamp + # self.get_logger().info(f"Received pointcloud with {msg.width} points") + additional_channels = self.param.subscriber_cfg[sub_key].get("channels", []) + channels = ["x", "y", "z"] + additional_channels + try: + points = rnp.numpify(msg) + except Exception as e: + self.get_logger().warn(f"Failed to numpify point cloud: {e}") + return + + # Check if points is empty - handle both structured array and dict cases + if points is None: + return + + # Handle different data structures from rnp.numpify + if isinstance(points, dict): + # If it's a dict, check if it has data + if not points or (len(points) == 0): + return + # Check for x,y,z keys or xyz field in dict + if 'xyz' in points: + # Handle combined xyz field (common in Livox lidars) + xyz_data = points['xyz'] + if len(xyz_data) == 0: + return + elif 'x' in points: + # Handle separate x,y,z fields + if len(points['x']) == 0: + return + else: + self.get_logger().warn(f"Point cloud dict missing 'x' or 'xyz' field. Available fields: {list(points.keys())}") + return + else: + # It's a structured numpy array + if points.size == 0: + return + frame_sensor_id = msg.header.frame_id + transform_sensor_to_map = self.safe_lookup_transform( + self.map_frame, + frame_sensor_id, + msg.header.stamp + ) + t = transform_sensor_to_map.transform.translation + q = transform_sensor_to_map.transform.rotation + t_np = np.array([t.x, t.y, t.z], dtype=np.float32) + R = quaternion_matrix([q.x, q.y, q.z, q.w])[:3, :3].astype(np.float32) + + # Extract xyz points based on data structure + if isinstance(points, dict): + # If points is a dict, manually construct xyz array + if 'xyz' in points: + # Handle combined xyz field (common in Livox lidars) + xyz_array = np.array(points['xyz']) + if xyz_array.ndim == 2 and xyz_array.shape[1] == 3: + pts = xyz_array + elif xyz_array.ndim == 1: + # Reshape if flattened + pts = xyz_array.reshape(-1, 3) + else: + # Try to extract x, y, z from the structure + pts = xyz_array[:, :3] if xyz_array.shape[1] >= 3 else xyz_array + elif 'x' in points and 'y' in points and 'z' in points: + # Handle separate x,y,z fields + x = np.array(points['x']).flatten() + y = np.array(points['y']).flatten() + z = np.array(points['z']).flatten() + pts = np.column_stack((x, y, z)) + else: + # Fallback: try to use any available method + self.get_logger().warn(f"Unexpected point cloud structure, attempting fallback") + return + + # Append additional channels + for channel in additional_channels: + if channel in points: + data = np.array(points[channel]).flatten() + if data.ndim == 1: + data = data[:, np.newaxis] + pts = np.hstack((pts, data)) + else: + # Use standard method for structured arrays + pts = rnp.point_cloud2.get_xyz_points(points) + # TODO: This is probably expensive. Consider modifying rnp or input_pointcloud() + # Append additional channels to pts + for channel in additional_channels: + if hasattr(points, 'dtype') and hasattr(points.dtype, 'names') and channel in points.dtype.names: + data = points[channel].flatten() + if data.ndim == 1: + data = data[:, np.newaxis] + pts = np.hstack((pts, data)) + self._map.input_pointcloud(pts, channels, R, t_np, 0, 0) + self._pointcloud_process_counter += 1 + + def pose_update(self) -> None: + if self._last_t is None: + return + transform = self.safe_lookup_transform( + self.map_frame, + self.base_frame, + self._last_t + ) + t = transform.transform.translation + q = transform.transform.rotation + trans = np.array([t.x, t.y, t.z], dtype=np.float32) + rot = quaternion_matrix([q.x, q.y, q.z, q.w])[:3, :3].astype(np.float32) + self._map.move_to(trans, rot) + self._map_t = t + self._map_q = q + + def update_variance(self) -> None: + self._map.update_variance() + + def update_time(self) -> None: + self._map.update_time() + + def destroy_node(self) -> None: + super().destroy_node() + +def main(args=None) -> None: + rclpy.init(args=args) + node = ElevationMappingNode() + executor = rclpy.executors.SingleThreadedExecutor() + executor.add_node(node) + try: + executor.spin() + except KeyboardInterrupt: + pass + finally: + executor.shutdown() + node.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() diff --git a/elevation_mapping_cupy/setup.cfg b/elevation_mapping_cupy/setup.cfg new file mode 100644 index 00000000..6ded92fd --- /dev/null +++ b/elevation_mapping_cupy/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/elevation_mapping_cupy +[install] +install_scripts=$base/lib/elevation_mapping_cupy \ No newline at end of file diff --git a/elevation_mapping_cupy/setup.py b/elevation_mapping_cupy/setup.py new file mode 100644 index 00000000..6cd48a96 --- /dev/null +++ b/elevation_mapping_cupy/setup.py @@ -0,0 +1,35 @@ +from setuptools import setup, find_packages +from glob import glob +import os + +package_name = 'elevation_mapping_cupy' + +setup( + name=package_name, + version='2.0.0', + packages=find_packages(include=[package_name, f'{package_name}.*']), + install_requires=['setuptools'], + zip_safe=True, + author='Your Name', + author_email='your.email@example.com', + maintainer='Your Name', + maintainer_email='your.email@example.com', + description='Elevation mapping on GPU', + license='MIT', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'elevation_mapping_node.py = elevation_mapping_cupy.elevation_mapping_node:main', + ], + }, + data_files=[ + ('share/ament_index/resource_index/packages',['resource/' + package_name]), + (os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')), + *[(os.path.join('share', package_name, os.path.dirname(yaml_file)), [yaml_file]) for yaml_file in glob('config/**/*.yaml', recursive=True)], + # also the .*dat files + *[(os.path.join('share', package_name, os.path.dirname(dat_file)), [dat_file]) for dat_file in glob('config/**/*.dat', recursive=True)], + # add rviz files + *[(os.path.join('share', package_name, os.path.dirname(rviz_file)), [rviz_file]) for rviz_file in glob('rviz/**/*.rviz', recursive=True)], + (os.path.join('share', package_name), ['package.xml']), + ], +) diff --git a/elevation_mapping_cupy/src/elevation_mapping_node.cpp b/elevation_mapping_cupy/src/elevation_mapping_node.cpp new file mode 100644 index 00000000..131e25cc --- /dev/null +++ b/elevation_mapping_cupy/src/elevation_mapping_node.cpp @@ -0,0 +1,31 @@ +// +// Copyright (c) 2022, Takahiro Miki. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for details. +// + +// Pybind +#include // everything needed for embedding + +// ROS2 +#include +#include "elevation_mapping_cupy/elevation_mapping_ros.hpp" + +int main(int argc, char** argv) { + rclcpp::init(argc, argv); + + // Allow declaring parameters from the yaml files + rclcpp::NodeOptions options; + options.automatically_declare_parameters_from_overrides(true); + options.allow_undeclared_parameters(true); + + py::scoped_interpreter guard{}; // start the interpreter and keep it alive + auto mapNode = std::make_shared(options); + py::gil_scoped_release release; + + + // TODO: Create a multi-threaded executor + rclcpp::spin(mapNode); + rclcpp::shutdown(); + return 0; +} + diff --git a/elevation_mapping_cupy/src/elevation_mapping_ros.cpp b/elevation_mapping_cupy/src/elevation_mapping_ros.cpp new file mode 100644 index 00000000..9d18dc4e --- /dev/null +++ b/elevation_mapping_cupy/src/elevation_mapping_ros.cpp @@ -0,0 +1,1035 @@ +// +// Copyright (c) 2022, Takahiro Miki. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for details. +// + + +#include "elevation_mapping_cupy/elevation_mapping_ros.hpp" + +// Pybind +#include + +// ROS2 +#include +#include +#include + +// PCL +#include + +#include + +namespace elevation_mapping_cupy { + + +ElevationMappingNode::ElevationMappingNode(const rclcpp::NodeOptions& options) + : Node("elevation_mapping_node", options), + node_(std::shared_ptr(this, [](auto *) {})), + // it_(node_), + lowpassPosition_(0, 0, 0), + lowpassOrientation_(0, 0, 0, 1), + positionError_(0), + orientationError_(0), + positionAlpha_(0.1), + orientationAlpha_(0.1), + enablePointCloudPublishing_(false), + isGridmapUpdated_(false){ + + RCLCPP_INFO(this->get_logger(), "Initializing ElevationMappingNode..."); + + tfBroadcaster_ = std::make_shared(*this);// ROS2构造TransformBroadcaster + tfBuffer_ = std::make_shared(this->get_clock()); + tfListener_ = std::make_shared(*tfBuffer_); + + + std::string pose_topic, map_frame; + std::vector map_topics; + double recordableFps, updateVarianceFps, timeInterval, updatePoseFps, updateGridMapFps, publishStatisticsFps; + bool enablePointCloudPublishing(false); + + py::gil_scoped_acquire acquire; + + this->get_parameter("initialize_frame_id", initialize_frame_id_); + this->get_parameter("initialize_tf_offset", initialize_tf_offset_); + this->get_parameter("pose_topic", pose_topic); + this->get_parameter("map_frame", mapFrameId_); + this->get_parameter("base_frame", baseFrameId_); + this->get_parameter("corrected_map_frame", correctedMapFrameId_); + this->get_parameter("initialize_method", initializeMethod_); + this->get_parameter("position_lowpass_alpha", positionAlpha_); + this->get_parameter("orientation_lowpass_alpha", orientationAlpha_); + this->get_parameter("recordable_fps", recordableFps); + this->get_parameter("update_variance_fps", updateVarianceFps); + this->get_parameter("time_interval", timeInterval); + this->get_parameter("update_pose_fps", updatePoseFps); + this->get_parameter("initialize_tf_grid_size", initializeTfGridSize_); + this->get_parameter("map_acquire_fps", updateGridMapFps); + this->get_parameter("publish_statistics_fps", publishStatisticsFps); + this->get_parameter("enable_pointcloud_publishing", enablePointCloudPublishing); + this->get_parameter("enable_normal_arrow_publishing", enableNormalArrowPublishing_); + this->get_parameter("enable_drift_corrected_TF_publishing", enableDriftCorrectedTFPublishing_); + this->get_parameter("use_initializer_at_start", useInitializerAtStart_); + this->get_parameter("always_clear_with_initializer", alwaysClearWithInitializer_); + this->get_parameter("voxel_filter_size", voxel_filter_size_); + + RCLCPP_INFO(this->get_logger(), "initialize_frame_id: %s", initialize_frame_id_.empty() ? "[]" : initialize_frame_id_[0].c_str()); + RCLCPP_INFO(this->get_logger(), "initialize_tf_offset: [%f, %f, %f, %f]", initialize_tf_offset_[0], initialize_tf_offset_[1], initialize_tf_offset_[2], initialize_tf_offset_[3]); + RCLCPP_INFO(this->get_logger(), "pose_topic: %s", pose_topic.c_str()); + RCLCPP_INFO(this->get_logger(), "map_frame: %s", mapFrameId_.c_str()); + RCLCPP_INFO(this->get_logger(), "base_frame: %s", baseFrameId_.c_str()); + RCLCPP_INFO(this->get_logger(), "corrected_map_frame: %s", correctedMapFrameId_.c_str()); + RCLCPP_INFO(this->get_logger(), "initialize_method: %s", initializeMethod_.c_str()); + RCLCPP_INFO(this->get_logger(), "position_lowpass_alpha: %f", positionAlpha_); + RCLCPP_INFO(this->get_logger(), "orientation_lowpass_alpha: %f", orientationAlpha_); + RCLCPP_INFO(this->get_logger(), "recordable_fps: %f", recordableFps); + RCLCPP_INFO(this->get_logger(), "update_variance_fps: %f", updateVarianceFps); + RCLCPP_INFO(this->get_logger(), "time_interval: %f", timeInterval); + RCLCPP_INFO(this->get_logger(), "update_pose_fps: %f", updatePoseFps); + RCLCPP_INFO(this->get_logger(), "initialize_tf_grid_size: %f", initializeTfGridSize_); + RCLCPP_INFO(this->get_logger(), "map_acquire_fps: %f", updateGridMapFps); + RCLCPP_INFO(this->get_logger(), "publish_statistics_fps: %f", publishStatisticsFps); + RCLCPP_INFO(this->get_logger(), "enable_pointcloud_publishing: %s", enablePointCloudPublishing ? "true" : "false"); + RCLCPP_INFO(this->get_logger(), "enable_normal_arrow_publishing: %s", enableNormalArrowPublishing_ ? "true" : "false"); + RCLCPP_INFO(this->get_logger(), "enable_drift_corrected_TF_publishing: %s", enableDriftCorrectedTFPublishing_ ? "true" : "false"); + RCLCPP_INFO(this->get_logger(), "use_initializer_at_start: %s", useInitializerAtStart_ ? "true" : "false"); + RCLCPP_INFO(this->get_logger(), "always_clear_with_initializer: %s", alwaysClearWithInitializer_ ? "true" : "false"); + RCLCPP_INFO(this->get_logger(), "voxel_filter_size: %f", voxel_filter_size_); + + enablePointCloudPublishing_ = enablePointCloudPublishing; + + map_ = std::make_shared(); + map_->initialize(node_); + + + + std::map subscriber_params, publisher_params; + if (!this->get_parameters("subscribers", subscriber_params)) { + RCLCPP_FATAL(this->get_logger(), "There aren't any subscribers to be configured, the elevation mapping cannot be configured. Exit"); + rclcpp::shutdown(); + } + if (!this->get_parameters("publishers", publisher_params)) { + RCLCPP_FATAL(this->get_logger(), "There aren't any publishers to be configured, the elevation mapping cannot be configured. Exit"); + rclcpp::shutdown(); + } + + + auto unique_sub_names = extract_unique_names(subscriber_params); + for (const auto& sub_name : unique_sub_names) { + std::string data_type; + if(this->get_parameter("subscribers." + sub_name + ".data_type", data_type)){ + // image + if(data_type == "image"){ + std::string camera_topic; + std::string info_topic; + this->get_parameter("subscribers." + sub_name + ".topic_name", camera_topic); + this->get_parameter("subscribers." + sub_name + ".info_name", info_topic); + RCLCPP_INFO(this->get_logger(), "camera_topic %s: %s", sub_name.c_str(), camera_topic.c_str()); + RCLCPP_INFO(this->get_logger(), "info_name %s: %s", sub_name.c_str(), info_topic.c_str()); + + // std::string transport_hint = "compressed"; + // std::size_t ind = camera_topic.find(transport_hint); // Find if compressed is in the topic name + // if (ind != std::string::npos) { + // transport_hint = camera_topic.substr(ind, camera_topic.length()); // Get the hint as the last part + // camera_topic.erase(ind - 1, camera_topic.length()); // We remove the hint from the topic + // } else { + // transport_hint = "raw"; // In the default case we assume raw topic + // } + + std::string key = sub_name; + + + sensor_msgs::msg::CameraInfo img_info; + elevation_map_msgs::msg::ChannelInfo channel_info; + imageInfoReady_[key] = std::make_pair(img_info, false); + imageChannelReady_[key] = std::make_pair(channel_info, false); + // Image subscriber init + + rmw_qos_profile_t sensor_qos_profile = rmw_qos_profile_sensor_data; + auto sensor_qos = rclcpp::QoS(rclcpp::QoSInitialization(sensor_qos_profile.history, sensor_qos_profile.depth), sensor_qos_profile); + + auto img_callback = [this, key](const sensor_msgs::msg::Image::SharedPtr msg) { + this->imageCallback(msg, key); + }; + auto img_sub = this->create_subscription(camera_topic, sensor_qos, img_callback); + imageSubs_.push_back(img_sub); + RCLCPP_INFO(this->get_logger(), "Subscribed to Image topic: %s", camera_topic.c_str()); + // Camera Info subscriber init + auto img_info_callback = [this, key](const sensor_msgs::msg::CameraInfo::SharedPtr msg) { + this->imageInfoCallback(msg, key); + }; + auto img_info_sub = this->create_subscription(info_topic, sensor_qos, img_info_callback); + cameraInfoSubs_.push_back(img_info_sub); + RCLCPP_INFO(this->get_logger(), "Subscribed to ImageInfo topic: %s", info_topic.c_str()); + + std::string channel_info_topic; + if (this->get_parameter("subscribers." + sub_name + ".channel_name", channel_info_topic)) { + // channel subscriber init + imageChannelReady_[key].second = false; + auto img_channel_callback = [this, key](const elevation_map_msgs::msg::ChannelInfo::SharedPtr msg) { + this->imageChannelCallback(msg, key); + }; + auto channel_info_sub = this->create_subscription(channel_info_topic, sensor_qos, img_channel_callback); + channelInfoSubs_.push_back(channel_info_sub); + RCLCPP_INFO(this->get_logger(), "Subscribed to ChannelInfo topic: %s", channel_info_topic.c_str()); + }else{ + imageChannelReady_[key].second = true; + channels_[key].push_back("rgb"); + } + }else if(data_type == "pointcloud"){ + std::string pointcloud_topic; + this->get_parameter("subscribers." + sub_name + ".topic_name", pointcloud_topic); + std::string key = sub_name; + channels_[key].push_back("x"); + channels_[key].push_back("y"); + channels_[key].push_back("z"); + + // rmw_qos_profile_t qos_profile = rmw_qos_profile_default; + // auto qos = rclcpp::QoS(rclcpp::QoSInitialization(qos_profile.history, qos_profile.depth), qos_profile); + + rmw_qos_profile_t sensor_qos_profile = rmw_qos_profile_sensor_data; + auto sensor_qos = rclcpp::QoS(rclcpp::QoSInitialization(sensor_qos_profile.history, sensor_qos_profile.depth), sensor_qos_profile); + + + // point_cloud_transport::Subscriber pct_sub = pct.subscribe( + // "pct/point_cloud", 100, + // [node](const sensor_msgs::msg::PointCloud2::ConstSharedPtr & msg) + // { + // RCLCPP_INFO_STREAM( + // node->get_logger(), + // "Message received, number of points is: " << msg->width * msg->height); + // }, {}); + // point_cloud_transport::Subscriber sub = pct.subscribe(pointcloud_topic, 100, callback, {}, transport_hint.get()) + + + auto callback_transport = [this, key](const sensor_msgs::msg::PointCloud2::ConstSharedPtr msg) { + this->pointcloudtransportCallback(msg, key); + }; + + // Create transport hints (e.g., "draco") + // auto transport_hint = std::make_shared("draco"); + + + // Use PointCloudTransport to create a subscriber + point_cloud_transport::PointCloudTransport pct(node_); + auto sub_transport = pct.subscribe(pointcloud_topic, 100, callback_transport); + + // Add the subscriber to the vector to manage its lifetime + pointcloudtransportSubs_.push_back(sub_transport); + + + // auto callback = [this, key](const sensor_msgs::msg::PointCloud2::SharedPtr msg) { + // this->pointcloudCallback(msg, key); + // }; + + // auto sub = this->create_subscription(pointcloud_topic, sensor_qos, callback); + + // pointcloudSubs_.push_back(sub); + RCLCPP_INFO(this->get_logger(), "Subscribed to PointCloud2 topic: %s", pointcloud_topic.c_str()); + } + } + } + + + + auto unique_pub_names = extract_unique_names(publisher_params); + + + std::string node_name = this->get_name(); + + for (const auto& pub_name : unique_pub_names) { + // Namespacing published topics under node_name + std::string topic_name = node_name + "/" + pub_name; + double fps; + std::vector layers_list; + std::vector basic_layers_list; + + this->get_parameter("publishers." + pub_name + ".layers", layers_list); + this->get_parameter("publishers." + pub_name + ".basic_layers", basic_layers_list); + this->get_parameter("publishers." + pub_name + ".fps", fps); + + if (fps > updateGridMapFps) { + RCLCPP_WARN( + this->get_logger(), + "[ElevationMappingCupy] fps for topic %s is larger than map_acquire_fps (%f > %f). The topic data will be only updated at %f " + "fps.", + topic_name.c_str(), fps, updateGridMapFps, updateGridMapFps); + } + + // Make publishers + auto pub = this->create_publisher(topic_name, 1); + RCLCPP_INFO(this->get_logger(), "Publishing map to topic %s", topic_name.c_str()); + mapPubs_.push_back(pub); + + // Register map layers + map_layers_.push_back(layers_list); + map_basic_layers_.push_back(basic_layers_list); + + // Register map fps + map_fps_.push_back(fps); + map_fps_unique_.insert(fps); + + } + + + setupMapPublishers(); + + pointPub_ = this->create_publisher("elevation_map_points", 1); + alivePub_ = this->create_publisher("alive", 1); + normalPub_ = this->create_publisher("normal", 1); + statisticsPub_ = this->create_publisher("statistics", 1); + + gridMap_.setFrameId(mapFrameId_); + +rawSubmapService_ = this->create_service( + "get_raw_submap", std::bind(&ElevationMappingNode::getSubmap, this, std::placeholders::_1, std::placeholders::_2)); + +clearMapService_ = this->create_service( + "clear_map", std::bind(&ElevationMappingNode::clearMap, this, std::placeholders::_1, std::placeholders::_2)); + +clearMapWithInitializerService_ = this->create_service( + "clear_map_with_initializer", std::bind(&ElevationMappingNode::clearMapWithInitializer, this, std::placeholders::_1, std::placeholders::_2)); + + +initializeMapService_ = this->create_service( + "initialize", std::bind(&ElevationMappingNode::initializeMap, this, std::placeholders::_1, std::placeholders::_2)); + +setPublishPointService_ = this->create_service( + "set_publish_points", std::bind(&ElevationMappingNode::setPublishPoint, this, std::placeholders::_1, std::placeholders::_2)); + +// checkSafetyService_ = this->create_service( +// "check_safety", std::bind(&ElevationMappingNode::checkSafety, this, std::placeholders::_1, std::placeholders::_2)); + + + if (updateVarianceFps > 0) { + double duration = 1.0 / (updateVarianceFps + 0.00001); + updateVarianceTimer_ = this->create_wall_timer(std::chrono::duration(duration), + std::bind(&ElevationMappingNode::updateVariance, this)); + } + if (timeInterval > 0) { + double duration = timeInterval; + updateTimeTimer_ = this->create_wall_timer(std::chrono::duration(duration), + std::bind(&ElevationMappingNode::updateTime, this)); + } + if (updatePoseFps > 0) { + double duration = 1.0 / (updatePoseFps + 0.00001); + updatePoseTimer_ = this->create_wall_timer(std::chrono::duration(duration), + std::bind(&ElevationMappingNode::updatePose, this)); + } + if (updateGridMapFps > 0) { + double duration = 1.0 / (updateGridMapFps + 0.00001); + updateGridMapTimer_ = this->create_wall_timer(std::chrono::duration(duration), + std::bind(&ElevationMappingNode::updateGridMap, this)); + } + if (publishStatisticsFps > 0) { + double duration = 1.0 / (publishStatisticsFps + 0.00001); + publishStatisticsTimer_ = this->create_wall_timer(std::chrono::duration(duration), + std::bind(&ElevationMappingNode::publishStatistics, this)); + } + lastStatisticsPublishedTime_ = this->now(); + RCLCPP_INFO(this->get_logger(), "[ElevationMappingCupy] finish initialization"); +} + + // namespace elevation_mapping_cupy + + +// // Setup map publishers +void ElevationMappingNode::setupMapPublishers() { + // Find the layers with highest fps. + float max_fps = -1; + // Create timers for each unique map frequencies + for (auto fps : map_fps_unique_) { + // Which publisher to call in the timer callback + std::vector indices; + // If this fps is max, update the map layers. + if (fps >= max_fps) { + max_fps = fps; + map_layers_all_.clear(); + } + for (int i = 0; i < map_fps_.size(); i++) { + if (map_fps_[i] == fps) { + indices.push_back(i); + // If this fps is max, add layers + if (fps >= max_fps) { + for (const auto layer : map_layers_[i]) { + map_layers_all_.insert(layer); + } + } + } + } + // Callback funtion. + // It publishes to specific topics. + auto cb = [this, indices]() -> void { + for (int i : indices) { + publishMapOfIndex(i); + } + }; + double duration = 1.0 / (fps + 0.00001); + mapTimers_.push_back(this->create_wall_timer(std::chrono::duration(duration), cb)); + } +} + + +void ElevationMappingNode::publishMapOfIndex(int index) { + // publish the map layers of index + if (!isGridmapUpdated_) { + return; + } + + + std::unique_ptr msg_ptr; + grid_map_msgs::msg::GridMap msg; + + std::vector layers; + + { // need continuous lock between adding layers and converting to message. Otherwise updateGridmap can reset the data not in + // map_layers_all_ + std::lock_guard lock(mapMutex_); + for (const auto& layer : map_layers_[index]) { + const bool is_layer_in_all = map_layers_all_.find(layer) != map_layers_all_.end(); + if (is_layer_in_all && gridMap_.exists(layer)) { + layers.push_back(layer); + } else if (map_->exists_layer(layer)) { + // if there are layers which is not in the syncing layer. + ElevationMappingWrapper::RowMatrixXf map_data; + map_->get_layer_data(layer, map_data); + gridMap_.add(layer, map_data); + layers.push_back(layer); + } + } + if (layers.empty()) { + return; + } + + msg_ptr = grid_map::GridMapRosConverter::toMessage(gridMap_, layers); + msg= *msg_ptr; + } + + msg.basic_layers = map_basic_layers_[index]; + mapPubs_[index]->publish(msg); +} + +void ElevationMappingNode::imageInfoCallback(const sensor_msgs::msg::CameraInfo::SharedPtr image_info, const std::string& key) { +imageInfoReady_[key] = std::make_pair(*image_info, true); + // Find and remove the subscription for this key + auto it = std::find_if(cameraInfoSubs_.begin(), cameraInfoSubs_.end(), + [key](const rclcpp::Subscription::SharedPtr& sub) { + return sub->get_topic_name() == key; + }); + if (it != cameraInfoSubs_.end()) { + cameraInfoSubs_.erase(it); + } + +} + +void ElevationMappingNode::imageChannelCallback(const elevation_map_msgs::msg::ChannelInfo::SharedPtr channel_info, const std::string& key) { +imageChannelReady_[key] = std::make_pair(*channel_info, true); + auto it = std::find_if(channelInfoSubs_.begin(), channelInfoSubs_.end(), + [key](const rclcpp::Subscription::SharedPtr& sub) { + return sub->get_topic_name() == key; + }); + if (it != channelInfoSubs_.end()) { + channelInfoSubs_.erase(it); + } +} + + +void ElevationMappingNode::pointcloudtransportCallback(const sensor_msgs::msg::PointCloud2::ConstSharedPtr cloud, const std::string& key) { + + // get channels + auto fields = cloud->fields; + std::vector channels; + + for (size_t it = 0; it < fields.size(); it++) { + auto& field = fields[it]; + channels.push_back(field.name); + } + inputPointCloud(cloud, channels); + + // This is used for publishing as statistics. + pointCloudProcessCounter_++; +} + +void ElevationMappingNode::pointcloudCallback(const sensor_msgs::msg::PointCloud2::SharedPtr cloud, const std::string& key) { + // get channels + auto fields = cloud->fields; + std::vector channels; + + for (size_t it = 0; it < fields.size(); it++) { + auto& field = fields[it]; + channels.push_back(field.name); + } + inputPointCloud(cloud, channels); + + // This is used for publishing as statistics. + pointCloudProcessCounter_++; +} + +void ElevationMappingNode::inputPointCloud(const sensor_msgs::msg::PointCloud2::ConstSharedPtr cloud, + const std::vector& channels) { + auto start = this->now(); + // auto* raw_pcl_pc = new pcl::PCLPointCloud2; + // pcl::PCLPointCloud2ConstPtr cloudPtr(raw_pcl_pc); + pcl::PCLPointCloud2::Ptr raw_pcl_pc(new pcl::PCLPointCloud2()); + pcl_conversions::toPCL(*cloud, *raw_pcl_pc); + + // apply the voxel filtering + pcl::PCLPointCloud2::Ptr pcl_pc (new pcl::PCLPointCloud2()); + // pcl::VoxelGrid voxel_filter; + voxel_filter.setInputCloud(raw_pcl_pc); + voxel_filter.setLeafSize(voxel_filter_size_,voxel_filter_size_,voxel_filter_size_); + voxel_filter.filter(*pcl_pc); + + RCLCPP_DEBUG(this->get_logger(), "Voxel grid filtered point cloud from %d points to %d points.", static_cast(raw_pcl_pc->width * raw_pcl_pc->height), static_cast(pcl_pc->width * pcl_pc->height)); + + // Get channels + auto fields = cloud->fields; + uint array_dim = channels.size(); + + RowMatrixXd points = RowMatrixXd(pcl_pc->width * pcl_pc->height, array_dim); + + for (unsigned int i = 0; i < pcl_pc->width * pcl_pc->height; ++i) { + for (unsigned int j = 0; j < channels.size(); ++j) { + float temp; + uint point_idx = i * pcl_pc->point_step + pcl_pc->fields[j].offset; + memcpy(&temp, &pcl_pc->data[point_idx], sizeof(float)); + points(i, j) = static_cast(temp); + } + } + + // Get pose of sensor in map frame + geometry_msgs::msg::TransformStamped transformStamped; + std::string sensorFrameId = cloud->header.frame_id; + auto timeStamp = cloud->header.stamp; + Eigen::Affine3d transformationSensorToMap; + try { + transformStamped = tfBuffer_->lookupTransform(mapFrameId_, sensorFrameId, tf2::TimePointZero); + transformationSensorToMap = tf2::transformToEigen(transformStamped); + } catch (tf2::TransformException& ex) { + RCLCPP_ERROR(this->get_logger(), "%s", ex.what()); + return; + } + + double positionError{0.0}; + double orientationError{0.0}; + { + std::lock_guard lock(errorMutex_); + positionError = positionError_; + orientationError = orientationError_; + } + + map_->input(points, channels, transformationSensorToMap.rotation(), transformationSensorToMap.translation(), positionError, + orientationError); + + if (enableDriftCorrectedTFPublishing_) { + publishMapToOdom(map_->get_additive_mean_error()); + } + + RCLCPP_DEBUG(this->get_logger(), "ElevationMap processed a point cloud (%i points) in %f sec.", static_cast(points.size()), + (this->now() - start).seconds()); + RCLCPP_DEBUG(this->get_logger(), "positionError: %f ", positionError); + RCLCPP_DEBUG(this->get_logger(), "orientationError: %f ", orientationError); +} + + + + +void ElevationMappingNode::inputImage(const sensor_msgs::msg::Image::ConstSharedPtr& image_msg, + const sensor_msgs::msg::CameraInfo::ConstSharedPtr& camera_info_msg, + const std::vector& channels) { + // Get image + cv::Mat image = cv_bridge::toCvShare(image_msg, image_msg->encoding)->image; + + // Change encoding to RGB/RGBA + if (image_msg->encoding == "bgr8") { + cv::cvtColor(image, image, CV_BGR2RGB); + } else if (image_msg->encoding == "bgra8") { + cv::cvtColor(image, image, CV_BGRA2RGBA); + } + + // Extract camera matrix + Eigen::Map> cameraMatrix(&camera_info_msg->k[0]); + + // Extract distortion coefficients + Eigen::VectorXd distortionCoeffs; + if (!camera_info_msg->d.empty()) { + distortionCoeffs = Eigen::Map(camera_info_msg->d.data(), camera_info_msg->d.size()); + } else { + RCLCPP_WARN(this->get_logger(), "Distortion coefficients are empty."); + distortionCoeffs = Eigen::VectorXd::Zero(5); + // return; + } + + // distortion model + std::string distortion_model = camera_info_msg->distortion_model; + + // Get pose of sensor in map frame + geometry_msgs::msg::TransformStamped transformStamped; + std::string sensorFrameId = image_msg->header.frame_id; + auto timeStamp = image_msg->header.stamp; + Eigen::Isometry3d transformationMapToSensor; + try { + transformStamped = tfBuffer_->lookupTransform(sensorFrameId, mapFrameId_, tf2::TimePointZero); + transformationMapToSensor = tf2::transformToEigen(transformStamped); + } catch (tf2::TransformException& ex) { + RCLCPP_ERROR(this->get_logger(), "%s", ex.what()); + return; + } + + // Transform image to vector of Eigen matrices for easy pybind conversion + std::vector image_split; + std::vector multichannel_image; + cv::split(image, image_split); + for (auto img : image_split) { + ColMatrixXf eigen_img; + cv::cv2eigen(img, eigen_img); + multichannel_image.push_back(eigen_img); + } + + // Check if the size of multichannel_image and channels and channel_methods matches. "rgb" counts for 3 layers. + int total_channels = 0; + for (const auto& channel : channels) { + if (channel == "rgb") { + total_channels += 3; + } else { + total_channels += 1; + } + } + if (total_channels != multichannel_image.size()) { + RCLCPP_ERROR(this->get_logger(), "Mismatch in the size of multichannel_image (%d), channels (%d). Please check the input.", multichannel_image.size(), channels.size()); + for (const auto& channel : channels) { + RCLCPP_INFO(this->get_logger(), "Channel: %s", channel.c_str()); + } + return; + } + + // Pass image to pipeline + map_->input_image(multichannel_image, channels, transformationMapToSensor.rotation(), transformationMapToSensor.translation(), cameraMatrix, + distortionCoeffs, distortion_model, image.rows, image.cols); +} + + + +void ElevationMappingNode::imageCallback(const sensor_msgs::msg::Image::SharedPtr image_msg, const std::string& key){ + auto start = this->now(); + + if (!imageInfoReady_[key].second){ + RCLCPP_WARN(this->get_logger(), "CameraInfo for key %s is not available yet.", key.c_str()); + return; + } + + + auto camera_info_msg = std::make_shared(imageInfoReady_[key].first); + if (std::find(channels_[key].begin(), channels_[key].end(), "rgb") != channels_[key].end()){ + inputImage(image_msg, camera_info_msg, channels_[key]); + } + else{ + if (!imageChannelReady_[key].second){ + RCLCPP_WARN(this->get_logger(), "ChannelInfo for key %s is not available yet.", key.c_str()); + return; + } + // Default channels and fusion methods for image is rgb and image_color + std::vector channels; + channels = imageChannelReady_[key].first.channels; + inputImage(image_msg, camera_info_msg, channels); + } + RCLCPP_DEBUG(this->get_logger(), "ElevationMap imageChannelCallback processed an image in %f sec.", (this->now() - start).seconds()); +} + + +void ElevationMappingNode::updatePose() { + geometry_msgs::msg::TransformStamped transformStamped; + const auto& timeStamp = this->now(); + Eigen::Affine3d transformationBaseToMap; + try { + transformStamped = tfBuffer_->lookupTransform(mapFrameId_, baseFrameId_, timeStamp, tf2::durationFromSec(1.0)); + transformationBaseToMap = tf2::transformToEigen(transformStamped); + } catch (tf2::TransformException& ex) { + RCLCPP_ERROR(this->get_logger(), "%s", ex.what()); + return; + } + + // This is to check if the robot is moving. If the robot is not moving, drift compensation is disabled to avoid creating artifacts. + Eigen::Vector3d position(transformStamped.transform.translation.x, + transformStamped.transform.translation.y, + transformStamped.transform.translation.z); + map_->move_to(position, transformationBaseToMap.rotation().transpose()); + Eigen::Vector3d position3(transformStamped.transform.translation.x, + transformStamped.transform.translation.y, + transformStamped.transform.translation.z); + Eigen::Vector4d orientation(transformStamped.transform.rotation.x, + transformStamped.transform.rotation.y, + transformStamped.transform.rotation.z, + transformStamped.transform.rotation.w); + lowpassPosition_ = positionAlpha_ * position3 + (1 - positionAlpha_) * lowpassPosition_; + lowpassOrientation_ = orientationAlpha_ * orientation + (1 - orientationAlpha_) * lowpassOrientation_; + { + std::lock_guard lock(errorMutex_); + positionError_ = (position3 - lowpassPosition_).norm(); + orientationError_ = (orientation - lowpassOrientation_).norm(); + } + + if (useInitializerAtStart_) { + RCLCPP_INFO(this->get_logger(), "Clearing map with initializer."); + initializeWithTF(); + useInitializerAtStart_ = false; + } +} + +void ElevationMappingNode::publishAsPointCloud(const grid_map::GridMap& map) const { + sensor_msgs::msg::PointCloud2 msg; + grid_map::GridMapRosConverter::toPointCloud(map, "elevation", msg); + pointPub_->publish(msg); +} + + +bool ElevationMappingNode::getSubmap( + const std::shared_ptr request, + std::shared_ptr response) { + std::string requestedFrameId = request->frame_id; + Eigen::Isometry3d transformationOdomToMap; + geometry_msgs::msg::Pose pose; + grid_map::Position requestedSubmapPosition(request->position_x, request->position_y); + + + if (requestedFrameId != mapFrameId_) { + geometry_msgs::msg::TransformStamped transformStamped; + + try { + const auto& timeStamp = this->now(); + transformStamped = tfBuffer_->lookupTransform(requestedFrameId, mapFrameId_, timeStamp, tf2::durationFromSec(1.0)); + + + pose.position.x = transformStamped.transform.translation.x; + pose.position.y = transformStamped.transform.translation.y; + pose.position.z = transformStamped.transform.translation.z; + pose.orientation = transformStamped.transform.rotation; + + tf2::fromMsg(pose, transformationOdomToMap); + } catch (tf2::TransformException& ex) { + RCLCPP_ERROR(this->get_logger(), "%s", ex.what()); + return false; + } + Eigen::Vector3d p(request->position_x, request->position_y, 0); + Eigen::Vector3d mapP = transformationOdomToMap.inverse() * p; + requestedSubmapPosition.x() = mapP.x(); + requestedSubmapPosition.y() = mapP.y(); + } + grid_map::Length requestedSubmapLength(request->length_x, request->length_y); + RCLCPP_DEBUG(this->get_logger(), "Elevation submap request: Position x=%f, y=%f, Length x=%f, y=%f.", + requestedSubmapPosition.x(), requestedSubmapPosition.y(), + requestedSubmapLength(0), requestedSubmapLength(1)); + + bool isSuccess; + grid_map::Index index; + grid_map::GridMap subMap; + { + std::lock_guard lock(mapMutex_); + subMap = gridMap_.getSubmap(requestedSubmapPosition, requestedSubmapLength, isSuccess); + } + const auto& length = subMap.getLength(); + if (requestedFrameId != mapFrameId_) { + subMap = subMap.getTransformedMap(transformationOdomToMap, "elevation", requestedFrameId); + } + + if (request->layers.empty()) { + auto msg_ptr = grid_map::GridMapRosConverter::toMessage(subMap); + response->map = *msg_ptr; + } else { + std::vector layers; + for (const auto& layer : request->layers) { + layers.push_back(layer); + } + auto msg_ptr = grid_map::GridMapRosConverter::toMessage(subMap, layers); + response->map = *msg_ptr; + + } + + return isSuccess; +} + + + +void ElevationMappingNode::clearMap(const std::shared_ptr request, + std::shared_ptr response) { + + std::lock_guard lock(mapMutex_); + RCLCPP_INFO(this->get_logger(), "Clearing map."); + map_->clear(); + if (alwaysClearWithInitializer_) { + initializeWithTF(); + } +} + +void ElevationMappingNode::clearMapWithInitializer(const std::shared_ptr request, std::shared_ptr response){ + RCLCPP_INFO(this->get_logger(), "Clearing map with initializer"); + map_->clear(); + initializeWithTF(); + +} + +void ElevationMappingNode::initializeWithTF() { + std::vector points; + const auto& timeStamp = this->now(); + int i = 0; + Eigen::Vector3d p; + for (const auto& frame_id : initialize_frame_id_) { + // Get tf from map frame to tf frame + Eigen::Affine3d transformationBaseToMap; + geometry_msgs::msg::TransformStamped transformStamped; + try { + transformStamped = tfBuffer_->lookupTransform(mapFrameId_, frame_id, timeStamp, tf2::durationFromSec(1.0)); + transformationBaseToMap = tf2::transformToEigen(transformStamped); + } catch (tf2::TransformException& ex) { + RCLCPP_ERROR(this->get_logger(), "%s", ex.what()); + return; + } + p = transformationBaseToMap.translation(); + p.z() += initialize_tf_offset_[i]; + points.push_back(p); + i++; + } + if (!points.empty() && points.size() < 3) { + points.emplace_back(p + Eigen::Vector3d(initializeTfGridSize_, initializeTfGridSize_, 0)); + points.emplace_back(p + Eigen::Vector3d(-initializeTfGridSize_, initializeTfGridSize_, 0)); + points.emplace_back(p + Eigen::Vector3d(initializeTfGridSize_, -initializeTfGridSize_, 0)); + points.emplace_back(p + Eigen::Vector3d(-initializeTfGridSize_, -initializeTfGridSize_, 0)); + } + RCLCPP_INFO(this->get_logger(), "Initializing map with points using %s", initializeMethod_.c_str()); + map_->initializeWithPoints(points, initializeMethod_); +} + +// bool ElevationMappingNode::checkSafety(elevation_map_msgs::CheckSafety::Request& request, +// elevation_map_msgs::CheckSafety::Response& response) { +// for (const auto& polygonstamped : request.polygons) { +// if (polygonstamped.polygon.points.empty()) { +// continue; +// } +// std::vector polygon; +// std::vector untraversable_polygon; +// Eigen::Vector3d result; +// result.setZero(); +// const auto& polygonFrameId = polygonstamped.header.frame_id; +// const auto& timeStamp = polygonstamped.header.stamp; +// double polygon_z = polygonstamped.polygon.points[0].z; + +// // Get tf from map frame to polygon frame +// if (mapFrameId_ != polygonFrameId) { +// Eigen::Affine3d transformationBaseToMap; +// tf::StampedTransform transformTf; +// try { +// transformListener_.waitForTransform(mapFrameId_, polygonFrameId, timeStamp, ros::Duration(1.0)); +// transformListener_.lookupTransform(mapFrameId_, polygonFrameId, timeStamp, transformTf); +// poseTFToEigen(transformTf, transformationBaseToMap); +// } catch (tf::TransformException& ex) { +// ROS_ERROR("%s", ex.what()); +// return false; +// } +// for (const auto& p : polygonstamped.polygon.points) { +// const auto& pvector = Eigen::Vector3d(p.x, p.y, p.z); +// const auto transformed_p = transformationBaseToMap * pvector; +// polygon.emplace_back(Eigen::Vector2d(transformed_p.x(), transformed_p.y())); +// } +// } else { +// for (const auto& p : polygonstamped.polygon.points) { +// polygon.emplace_back(Eigen::Vector2d(p.x, p.y)); +// } +// } + +// map_.get_polygon_traversability(polygon, result, untraversable_polygon); + +// geometry_msgs::PolygonStamped untraversable_polygonstamped; +// untraversable_polygonstamped.header.stamp = ros::Time::now(); +// untraversable_polygonstamped.header.frame_id = mapFrameId_; +// for (const auto& p : untraversable_polygon) { +// geometry_msgs::Point32 point; +// point.x = static_cast(p.x()); +// point.y = static_cast(p.y()); +// point.z = static_cast(polygon_z); +// untraversable_polygonstamped.polygon.points.push_back(point); +// } +// // traversability_result; +// response.is_safe.push_back(bool(result[0] > 0.5)); +// response.traversability.push_back(result[1]); +// response.untraversable_polygons.push_back(untraversable_polygonstamped); +// } +// return true; +// } + + + +void ElevationMappingNode::setPublishPoint(const std::shared_ptr request, + std::shared_ptr response) { + enablePointCloudPublishing_ = request->data; + response->success = true; +} + + +void ElevationMappingNode::updateVariance() { + map_->update_variance(); +} + +void ElevationMappingNode::updateTime() { + map_->update_time(); +} + +void ElevationMappingNode::publishStatistics() { + auto now = this->now(); + double dt = (now - lastStatisticsPublishedTime_).seconds(); + lastStatisticsPublishedTime_ = now; + elevation_map_msgs::msg::Statistics msg; + msg.header.stamp = now; + if (dt > 0.0) { + msg.pointcloud_process_fps = pointCloudProcessCounter_ / dt; + } + pointCloudProcessCounter_ = 0; + statisticsPub_->publish(msg); +} + +void ElevationMappingNode::updateGridMap() { + std::vector layers(map_layers_all_.begin(), map_layers_all_.end()); + std::lock_guard lock(mapMutex_); + map_->get_grid_map(gridMap_, layers); + gridMap_.setTimestamp(this->now().nanoseconds()); + alivePub_->publish(std_msgs::msg::Empty()); + + // Mostly debug purpose + if (enablePointCloudPublishing_) { + publishAsPointCloud(gridMap_); + } + if (enableNormalArrowPublishing_) { + publishNormalAsArrow(gridMap_); + } + isGridmapUpdated_ = true; +} + +void ElevationMappingNode::initializeMap(const std::shared_ptr request, + std::shared_ptr response) { + // If initialize method is points + if (request->type == elevation_map_msgs::srv::Initialize::Request::POINTS) { + std::vector points; + for (const auto& point : request->points) { + const auto& pointFrameId = point.header.frame_id; + const auto& timeStamp = point.header.stamp; + const auto& pvector = Eigen::Vector3d(point.point.x, point.point.y, point.point.z); + + // Get tf from map frame to points' frame + if (mapFrameId_ != pointFrameId) { + Eigen::Affine3d transformationBaseToMap; + geometry_msgs::msg::TransformStamped transformStamped; + try { + transformStamped = tfBuffer_->lookupTransform(mapFrameId_, pointFrameId, timeStamp, tf2::durationFromSec(1.0)); + transformationBaseToMap = tf2::transformToEigen(transformStamped); + } catch (tf2::TransformException& ex) { + RCLCPP_ERROR(this->get_logger(), "%s", ex.what()); + response->success = false; + return; + } + const auto transformed_p = transformationBaseToMap * pvector; + points.push_back(transformed_p); + } else { + points.push_back(pvector); + } + } + std::string method; + switch (request->method) { + case elevation_map_msgs::srv::Initialize::Request::NEAREST: + method = "nearest"; + break; + case elevation_map_msgs::srv::Initialize::Request::LINEAR: + method = "linear"; + break; + case elevation_map_msgs::srv::Initialize::Request::CUBIC: + method = "cubic"; + break; + } + RCLCPP_INFO(this->get_logger(), "Initializing map with points using %s", method.c_str()); + map_->initializeWithPoints(points, method); + } + response->success = true; +} + +void ElevationMappingNode::publishNormalAsArrow(const grid_map::GridMap& map) const { + auto startTime = this->now(); + + const auto& normalX = map["normal_x"]; + const auto& normalY = map["normal_y"]; + const auto& normalZ = map["normal_z"]; + double scale = 0.1; + + visualization_msgs::msg::MarkerArray markerArray; + // For each cell in map. + for (grid_map::GridMapIterator iterator(map); !iterator.isPastEnd(); ++iterator) { + if (!map.isValid(*iterator, "elevation")) { + continue; + } + grid_map::Position3 p; + map.getPosition3("elevation", *iterator, p); + Eigen::Vector3d start = p; + const auto i = iterator.getLinearIndex(); + Eigen::Vector3d normal(normalX(i), normalY(i), normalZ(i)); + Eigen::Vector3d end = start + normal * scale; + if (normal.norm() < 0.1) { + continue; + } + markerArray.markers.push_back(vectorToArrowMarker(start, end, i)); + } + normalPub_->publish(markerArray); + +} + + + +visualization_msgs::msg::Marker ElevationMappingNode::vectorToArrowMarker(const Eigen::Vector3d& start, const Eigen::Vector3d& end, + const int id) const { + visualization_msgs::msg::Marker marker; + marker.header.frame_id = mapFrameId_; + marker.header.stamp = this->now(); + marker.ns = "normal"; + marker.id = id; + marker.type = visualization_msgs::msg::Marker::ARROW; + marker.action = visualization_msgs::msg::Marker::ADD; + marker.points.resize(2); + marker.points[0].x = start.x(); + marker.points[0].y = start.y(); + marker.points[0].z = start.z(); + marker.points[1].x = end.x(); + marker.points[1].y = end.y(); + marker.points[1].z = end.z(); + marker.pose.orientation.x = 0.0; + marker.pose.orientation.y = 0.0; + marker.pose.orientation.z = 0.0; + marker.pose.orientation.w = 1.0; + marker.scale.x = 0.01; + marker.scale.y = 0.01; + marker.scale.z = 0.01; + marker.color.a = 1.0; // Don't forget to set the alpha! + marker.color.r = 0.0; + marker.color.g = 1.0; + marker.color.b = 0.0; + + return marker; +} + + +void ElevationMappingNode::publishMapToOdom(double error) { + geometry_msgs::msg::TransformStamped transform_stamped; + transform_stamped.header.stamp = this->now(); + transform_stamped.header.frame_id = mapFrameId_; + transform_stamped.child_frame_id = correctedMapFrameId_; + transform_stamped.transform.translation.x = 0.0; + transform_stamped.transform.translation.y = 0.0; + transform_stamped.transform.translation.z = error; + + tf2::Quaternion q; + q.setRPY(0, 0, 0); + transform_stamped.transform.rotation.x = q.x(); + transform_stamped.transform.rotation.y = q.y(); + transform_stamped.transform.rotation.z = q.z(); + transform_stamped.transform.rotation.w = q.w(); + + tfBroadcaster_->sendTransform(transform_stamped); +} + + + +} // namespace elevation_mapping_cupy diff --git a/elevation_mapping_cupy/src/elevation_mapping_wrapper.cpp b/elevation_mapping_cupy/src/elevation_mapping_wrapper.cpp new file mode 100644 index 00000000..93beb0b4 --- /dev/null +++ b/elevation_mapping_cupy/src/elevation_mapping_wrapper.cpp @@ -0,0 +1,364 @@ +// +// Copyright (c) 2022, Takahiro Miki. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for details. +// + +#include "elevation_mapping_cupy/elevation_mapping_wrapper.hpp" + +// Pybind +#include + +// PCL +#include + +// ROS +#include +#include + +#include + + + +namespace elevation_mapping_cupy { + +std::vector extract_unique_names(const std::map& subscriber_params) { + std::set unique_names_set; + for (const auto& param : subscriber_params) { + std::size_t pos = param.first.find('.'); + if (pos != std::string::npos) { + std::string name = param.first.substr(0, pos); + unique_names_set.insert(name); + } + } + return std::vector(unique_names_set.begin(), unique_names_set.end()); +} + + +ElevationMappingWrapper::ElevationMappingWrapper(){} + +void ElevationMappingWrapper::initialize(const std::shared_ptr& node){ + if (!node) { + RCLCPP_ERROR(rclcpp::get_logger("rclcpp"), "Invalid node shared pointer"); + return; + } + node_ = node; + // Add the elevation_mapping_cupy path to sys.path + auto threading = py::module::import("threading"); + py::gil_scoped_acquire acquire; + + + auto elevation_mapping = py::module::import("elevation_mapping_cupy.elevation_mapping"); + auto parameter = py::module::import("elevation_mapping_cupy.parameter"); + param_ = parameter.attr("Parameter")(); + setParameters(); + map_ = elevation_mapping.attr("ElevationMap")(param_); + RCLCPP_INFO(node_->get_logger(), "ElevationMappingWrapper has been initialized"); + +} + +// /** +// * Load ros parameters into Parameter class. +// * Search for the same name within the name space. +// */ +void ElevationMappingWrapper::setParameters() { + + // Get all parameters names and types. + py::list paramNames = param_.attr("get_names")(); + py::list paramTypes = param_.attr("get_types")(); + py::gil_scoped_acquire acquire; + + // // Try to find the parameter in the ROS parameter server. + // // If there was a parameter, set it to the Parameter variable. + for (size_t i = 0; i < paramNames.size(); i++) { + std::string type = py::cast(paramTypes[i]); + std::string name = py::cast(paramNames[i]); + RCLCPP_INFO(node_->get_logger(), "type: %s, name %s", type.c_str(), name.c_str()); + if (type == "float") { + float param; + if (node_->get_parameter(name, param)) { + RCLCPP_INFO(node_->get_logger(), "Retrieved parameter: %s value: %f", name.c_str(), param); + param_.attr("set_value")(name, param); + RCLCPP_INFO(node_->get_logger(), "Set parameter: %s value: %f", name.c_str(), param); + } else { + RCLCPP_WARN(node_->get_logger(), "Parameter not found or invalid: %s", name.c_str()); + } + } else if (type == "str") { + std::string param; + if (node_->get_parameter(name, param)) { + RCLCPP_INFO(node_->get_logger(), "Retrieved parameter: %s value: %s", name.c_str(), param.c_str()); + param_.attr("set_value")(name, param); + RCLCPP_INFO(node_->get_logger(), "Set parameter: %s value: %s", name.c_str(), param.c_str()); + } else { + RCLCPP_WARN(node_->get_logger(), "Parameter not found or invalid: %s", name.c_str()); + } + } else if (type == "bool") { + bool param; + if (node_->get_parameter(name, param)) { + RCLCPP_INFO(node_->get_logger(), "Retrieved parameter: %s value: %s", name.c_str(), param ? "true" : "false"); + param_.attr("set_value")(name, param); + RCLCPP_INFO(node_->get_logger(), "Set parameter: %s value: %s", name.c_str(), param ? "true" : "false"); + } else { + RCLCPP_WARN(node_->get_logger(), "Parameter not found or invalid: %s", name.c_str()); + } + } else if (type == "int") { + int param; + if (node_->get_parameter(name, param)) { + RCLCPP_INFO(node_->get_logger(), "Retrieved parameter: %s value: %d", name.c_str(), param); + param_.attr("set_value")(name, param); + RCLCPP_INFO(node_->get_logger(), "Set parameter: %s value: %d", name.c_str(), param); + } else { + RCLCPP_WARN(node_->get_logger(), "Parameter not found or invalid: %s", name.c_str()); + } + } + + } + + py::dict sub_dict; + // rclcpp::Parameter subscribers; + std::vector parameter_prefixes; + auto parameters = node_->list_parameters(parameter_prefixes, 2); // List all parameters with a maximum depth of 10 + + + std::map subscriber_params; + if (!node_->get_parameters("subscribers", subscriber_params)) { + RCLCPP_FATAL(node_->get_logger(), "There aren't any subscribers to be configured, the elevation mapping cannot be configured. Exit"); + rclcpp::shutdown(); + } + auto unique_sub_names = extract_unique_names(subscriber_params); + for (const auto& name : unique_sub_names) { + const char* const name_c = name.c_str(); + if (!sub_dict.contains(name_c)) { + sub_dict[name_c] = py::dict(); + } + std::string topic_name; + if(node_->get_parameter("subscribers." + name + ".topic_name", topic_name)){ + const char* topic_name_cstr = "topic_name"; + sub_dict[name_c][topic_name_cstr] = static_cast(topic_name); + std::string data_type; + if(node_->get_parameter("subscribers." + name + ".data_type", data_type)){ + const char* data_type_cstr = "data_type"; + sub_dict[name_c][data_type_cstr] = static_cast(data_type); + } + std::string info_name; + if(node_->get_parameter("subscribers." + name + ".data_type", info_name)){ + const char* info_name_cstr = "info_name"; + sub_dict[name_c][info_name_cstr] = static_cast(info_name); + } + std::string channel_name; + if(node_->get_parameter("subscribers." + name + ".data_type", channel_name)){ + const char* channel_name_cstr = "channel_name"; + sub_dict[name_c][channel_name_cstr] = static_cast(channel_name); + } + } + } + + + + param_.attr("subscriber_cfg") = sub_dict; + + + // point cloud channel fusion + std::map pointcloud_channel_fusions_params; + if (node_->get_parameters("pointcloud_channel_fusions", pointcloud_channel_fusions_params)) { + py::dict pointcloud_channel_fusion_dict; + for (const auto& param : pointcloud_channel_fusions_params) { + std::string param_name = param.first; + std::string param_value = param.second.as_string(); + // Extract the string after "pointcloud_channel_fusions." + pointcloud_channel_fusion_dict[param_name.c_str()] = param_value; + } + // Print the dictionary for debugging + for (auto item : pointcloud_channel_fusion_dict) { + RCLCPP_INFO(node_->get_logger(), "pointcloud_channel_fusions Key: %s, Value: %s", std::string(py::str(item.first)).c_str(), std::string(py::str(item.second)).c_str()); + } + } else { + RCLCPP_WARN(node_->get_logger(), "No parameters found for 'pointcloud_channel_fusions'"); + } + + // image channel fusion + std::map image_channel_fusions_params; + if (node_->get_parameters("image_channel_fusions", image_channel_fusions_params)) { + py::dict image_channel_fusion_dict; + for (const auto& param : image_channel_fusions_params) { + std::string param_name = param.first; + std::string param_value = param.second.as_string(); + // Extract the string after "pointcloud_channel_fusions." + image_channel_fusion_dict[param_name.c_str()] = param_value; + } + // Print the dictionary for debugging + for (auto item : image_channel_fusion_dict) { + RCLCPP_INFO(node_->get_logger(), "image_channel_fusions Key: %s, Value: %s", std::string(py::str(item.first)).c_str(), std::string(py::str(item.second)).c_str()); + } + } else { + RCLCPP_WARN(node_->get_logger(), "No parameters found for 'image_channel_fusions'"); + } + + + // Update the cell_n parameters based on the map length and resolution + RCLCPP_INFO(node_->get_logger(), "Updating cell_n parameters based on map length and resolution"); + param_.attr("update")(); + resolution_ = py::cast(param_.attr("get_value")("resolution")); + map_length_ = py::cast(param_.attr("get_value")("true_map_length")); + map_n_ = py::cast(param_.attr("get_value")("true_cell_n")); + RCLCPP_INFO(node_->get_logger(), "cell_n: %d", map_n_); + RCLCPP_INFO(node_->get_logger(), "resolution: %f", resolution_); + RCLCPP_INFO(node_->get_logger(), "true_map_length: %f", map_length_); + + enable_normal_color_ = node_->get_parameter("enable_normal_color").as_bool(); + +} + +void ElevationMappingWrapper::input(const RowMatrixXd& points, const std::vector& channels, const RowMatrixXd& R, + const Eigen::VectorXd& t, const double positionNoise, const double orientationNoise) { + py::gil_scoped_acquire acquire; + map_.attr("input_pointcloud")(Eigen::Ref(points), channels, Eigen::Ref(R), + Eigen::Ref(t), positionNoise, orientationNoise); +} + +void ElevationMappingWrapper::input_image(const std::vector& multichannel_image, const std::vector& channels, const RowMatrixXd& R, + const Eigen::VectorXd& t, const RowMatrixXd& cameraMatrix, const Eigen::VectorXd& D, const std::string distortion_model, int height, int width) { + py::gil_scoped_acquire acquire; + map_.attr("input_image")(multichannel_image, channels, Eigen::Ref(R), Eigen::Ref(t), + Eigen::Ref(cameraMatrix), Eigen::Ref(D), distortion_model, height, width); +} + +void ElevationMappingWrapper::move_to(const Eigen::VectorXd& p, const RowMatrixXd& R) { + py::gil_scoped_acquire acquire; + map_.attr("move_to")(Eigen::Ref(p), Eigen::Ref(R)); +} + +void ElevationMappingWrapper::clear() { + py::gil_scoped_acquire acquire; + map_.attr("clear")(); +} + +double ElevationMappingWrapper::get_additive_mean_error() { + py::gil_scoped_acquire acquire; + return map_.attr("get_additive_mean_error")().cast(); +} + +bool ElevationMappingWrapper::exists_layer(const std::string& layerName) { + py::gil_scoped_acquire acquire; + return py::cast(map_.attr("exists_layer")(layerName)); +} + +void ElevationMappingWrapper::get_layer_data(const std::string& layerName, RowMatrixXf& map) { + py::gil_scoped_acquire acquire; + map = RowMatrixXf(map_n_, map_n_); + map_.attr("get_map_with_name_ref")(layerName, Eigen::Ref(map)); +} + +void ElevationMappingWrapper::get_grid_map(grid_map::GridMap& gridMap, const std::vector& requestLayerNames) { + std::vector basicLayerNames; + std::vector layerNames = requestLayerNames; + std::vector selection; + for (const auto& layerName : layerNames) { + if (layerName == "elevation") { + basicLayerNames.push_back("elevation"); + } + } + + RowMatrixXd pos(1, 3); + py::gil_scoped_acquire acquire; + map_.attr("get_position")(Eigen::Ref(pos)); + grid_map::Position position(pos(0, 0), pos(0, 1)); + grid_map::Length length(map_length_, map_length_); + gridMap.setGeometry(length, resolution_, position); + std::vector maps; + + for (const auto& layerName : layerNames) { + bool exists = map_.attr("exists_layer")(layerName).cast(); + if (exists) { + RowMatrixXf map(map_n_, map_n_); + map_.attr("get_map_with_name_ref")(layerName, Eigen::Ref(map)); + gridMap.add(layerName, map); + } + } + if (enable_normal_color_) { + RowMatrixXf normal_x(map_n_, map_n_); + RowMatrixXf normal_y(map_n_, map_n_); + RowMatrixXf normal_z(map_n_, map_n_); + map_.attr("get_normal_ref")(Eigen::Ref(normal_x), Eigen::Ref(normal_y), Eigen::Ref(normal_z)); + gridMap.add("normal_x", normal_x); + gridMap.add("normal_y", normal_y); + gridMap.add("normal_z", normal_z); + } + gridMap.setBasicLayers(basicLayerNames); + if (enable_normal_color_) { + addNormalColorLayer(gridMap); + } +} + +void ElevationMappingWrapper::get_polygon_traversability(std::vector& polygon, Eigen::Vector3d& result, + std::vector& untraversable_polygon) { + if (polygon.size() < 3) { + return; + } + RowMatrixXf polygon_m(polygon.size(), 2); + int i = 0; + for (auto& p : polygon) { + polygon_m(i, 0) = p.x(); + polygon_m(i, 1) = p.y(); + i++; + } + py::gil_scoped_acquire acquire; + const int untraversable_polygon_num = + map_.attr("get_polygon_traversability")(Eigen::Ref(polygon_m), Eigen::Ref(result)).cast(); + + untraversable_polygon.clear(); + if (untraversable_polygon_num > 0) { + RowMatrixXf untraversable_polygon_m(untraversable_polygon_num, 2); + map_.attr("get_untraversable_polygon")(Eigen::Ref(untraversable_polygon_m)); + for (int j = 0; j < untraversable_polygon_num; j++) { + Eigen::Vector2d p; + p.x() = untraversable_polygon_m(j, 0); + p.y() = untraversable_polygon_m(j, 1); + untraversable_polygon.push_back(p); + } + } +} + +void ElevationMappingWrapper::initializeWithPoints(std::vector& points, std::string method) { + RowMatrixXd points_m(points.size(), 3); + int i = 0; + for (auto& p : points) { + points_m(i, 0) = p.x(); + points_m(i, 1) = p.y(); + points_m(i, 2) = p.z(); + i++; + } + py::gil_scoped_acquire acquire; + map_.attr("initialize_map")(Eigen::Ref(points_m), method); +} + +void ElevationMappingWrapper::addNormalColorLayer(grid_map::GridMap& map) { + const auto& normalX = map["normal_x"]; + const auto& normalY = map["normal_y"]; + const auto& normalZ = map["normal_z"]; + + map.add("color"); + auto& color = map["color"]; + + // X: -1 to +1 : Red: 0 to 255 + // Y: -1 to +1 : Green: 0 to 255 + // Z: 0 to 1 : Blue: 128 to 255 + + // For each cell in map. + for (size_t i = 0; i < color.size(); ++i) { + const Eigen::Vector3f colorVector((normalX(i) + 1.0) / 2.0, (normalY(i) + 1.0) / 2.0, (normalZ(i))); + Eigen::Vector3i intColorVector = (colorVector * 255.0).cast(); + grid_map::colorVectorToValue(intColorVector, color(i)); + } +} + +void ElevationMappingWrapper::update_variance() { + py::gil_scoped_acquire acquire; + map_.attr("update_variance")(); +} + +void ElevationMappingWrapper::update_time() { + py::gil_scoped_acquire acquire; + map_.attr("update_time")(); +} + +} // namespace elevation_mapping_cupy diff --git a/grid_map_filters_rsl/CMakeLists.txt b/grid_map_filters_rsl/CMakeLists.txt deleted file mode 100644 index 444929cb..00000000 --- a/grid_map_filters_rsl/CMakeLists.txt +++ /dev/null @@ -1,126 +0,0 @@ -# Project configuration -cmake_minimum_required (VERSION 3.5.1) -project(grid_map_filters_rsl) - -set(CMAKE_CXX_STANDARD 14) -add_compile_options(-Wall -Wextra -Wpedantic) -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -set(CATKIN_PACKAGE_DEPENDENCIES - grid_map_cv - grid_map_core -) - -find_package(catkin REQUIRED - COMPONENTS - ${CATKIN_PACKAGE_DEPENDENCIES} -) - -find_package(Eigen3 REQUIRED) - -################################### -## catkin specific configuration ## -################################### -catkin_package( - INCLUDE_DIRS - include - ${EIGEN3_INCLUDE_DIR} - LIBRARIES - ${PROJECT_NAME} - CATKIN_DEPENDS - ${CATKIN_PACKAGE_DEPENDENCIES} -) - -########### -## Build ## -########### -add_library(${PROJECT_NAME} - src/GridMapDerivative.cpp - src/inpainting.cpp - src/lookup.cpp - src/smoothing.cpp - src/processing.cpp -) - -target_include_directories(${PROJECT_NAME} PRIVATE - include -) - -target_include_directories(${PROJECT_NAME} SYSTEM PUBLIC - ${catkin_INCLUDE_DIRS} - ${EIGEN3_INCLUDE_DIR} -) - - -target_link_libraries(${PROJECT_NAME} - ${catkin_LIBRARIES} -) - -########## -## Test ## -########## - -## GTest. -if(CATKIN_ENABLE_TESTING) - ## Find catkin dependencies, including test dependencies. - find_package(catkin REQUIRED - COMPONENTS - ${CATKIN_PACKAGE_DEPENDENCIES} - ) - - catkin_add_gtest(test_${PROJECT_NAME} - test/TestDerivativeFilter.cpp - test/TestFilters.cpp - test/TestLookup.cpp - ) -endif() - -## Link GTest. -if(TARGET test_${PROJECT_NAME}) - target_link_libraries(test_${PROJECT_NAME} - ${PROJECT_NAME} - ${catkin_LIBRARIES} - gtest_main - ) - - target_include_directories(test_${PROJECT_NAME} PRIVATE - include - ) - - target_include_directories(test_${PROJECT_NAME} SYSTEM PUBLIC - ${catkin_INCLUDE_DIRS} - ) - - # Generate test coverage report - find_package(cmake_code_coverage QUIET) - if(cmake_code_coverage_FOUND) - add_gtest_coverage( - TEST_BUILD_TARGETS test_${PROJECT_NAME} - ) - endif(cmake_code_coverage_FOUND) -endif() - -############# -## Install ## -############# -install( - TARGETS - ${PROJECT_NAME} - ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} -) - -install( - DIRECTORY - include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} -) - -########### -## Clang ## -########### -find_package(cmake_clang_tools QUIET) -if(cmake_clang_tools_FOUND) - add_default_clang_tooling() -endif(cmake_clang_tools_FOUND) \ No newline at end of file diff --git a/grid_map_filters_rsl/include/grid_map_filters_rsl/smoothing.hpp b/grid_map_filters_rsl/include/grid_map_filters_rsl/smoothing.hpp deleted file mode 100644 index 9f0deebd..00000000 --- a/grid_map_filters_rsl/include/grid_map_filters_rsl/smoothing.hpp +++ /dev/null @@ -1,95 +0,0 @@ -/** - * @file smoothing.hpp - * @authors Fabian Jenelten - * @date 18.05, 2021 - * @affiliation ETH RSL - * @brief Smoothing and outlier rejection filters. - */ - -#pragma once - -// grid map. -#include - -namespace grid_map { -namespace smoothing { - -/** - * @brief Sequential median filter (open-cv function). In-place operation (layerIn = layerOut) is supported. - * @param map grid map - * @param layerIn reference layer (filter is applied wrt this layer) - * @param layerOut output layer (filtered map is written into this layer) - * @param kernelSize size of the smoothing window (must be an odd number) - * @param deltaKernelSize kernel size is increased by this value, if numberOfRepeats > 1 - * @param numberOfRepeats number of sequentially applied median filters (approaches to gaussian blurring if increased) - */ -void median(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int deltaKernelSize = 2, - int numberOfRepeats = 1); - -/** - * @brief Sequential box blur filter (open cv-function). In-place operation (layerIn = layerOut) is supported. - * @param map grid map - * @param layerIn reference layer (filter is applied wrt this layer) - * @param layerOut output layer (filtered map is written into this layer) - * @param kernelSize size of the smoothing window (should be an odd number, otherwise, introduces offset) - * @param numberOfRepeats number of sequentially applied blurring filters (approaches to gaussian blurring if increased) - */ -void boxBlur(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int numberOfRepeats = 1); - -/** - * @brief Gaussian blur filter (open cv-function). In-place operation (layerIn = layerOut) is supported. - * @param map grid map - * @param layerIn reference layer (filter is applied wrt this layer) - * @param layerOut output layer (filtered map is written into this layer) - * @param kernelSize size of the smoothing window (should be an odd number, otherwise, introduces offset) - * @param sigma variance - */ -void gaussianBlur(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, double sigma); - -/** - * @brief Non-Local means denoising filter (open-cv function). In-place operation (layerIn = layerOut) is supported. Attention: slow (~5ms)! - * @param map grid map - * @param layerIn reference layer (filter is applied wrt this layer) - * @param layerOut output layer (filtered map is written into this layer) - * @param kernelSize size of the smoothing window (must be an odd number) - * @param searchWindow search window (must be an odd number and larger than kernelSize) - * @param w filter strength - */ -void nlm(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize = 3, int searchWindow = 7, - float w = 45.F); - -/** - * @brief Performs image denoising using the Block-Matching and 3D-filtering algorithm (open-cv function). In-place operation (layerIn = - * layerOut) is supported. Attention: very slow (~30ms)! - * @param map grid map - * @param layerIn reference layer (filter is applied wrt this layer) - * @param layerOut output layer (filtered map is written into this layer) - * @param kernelSize size of the smoothing window (must be power of 2) - * @param searchWindow search window (must be larger than kernelSize) - * @param w filter strength - */ -void bm3d(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize = 4, int searchWindow = 6, - float w = 25.F); - -/** - * @brief Bilateral filter (open-cv function). In-place operation (layerIn = layerOut) is supported. Attention: slow (~0.3ms)! - * @param map grid map - * @param layerIn reference layer (filter is applied wrt this layer) - * @param layerOut output layer (filtered map is written into this layer) - * @param kernelSize size of the smoothing window (must be an even number) - * @param w filter strength - */ -void bilateralFilter(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize = 0, double w = 0.2); - -/** - * @brief Optimization based (open-cv function). In-place operation (layerIn = layerOut) is supported. Attention: slow (~15ms)! - * @param map grid map - * @param layerIn reference layer (filter is applied wrt this layer) - * @param layerOut output layer (filtered map is written into this layer) - * @param lambda the smaller the value, the smoother the image - * @param n number of optimization iterations - */ -void tvL1(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, double lambda = 1.0, int n = 60); - -} // namespace smoothing -} // namespace grid_map diff --git a/grid_map_filters_rsl/package.xml b/grid_map_filters_rsl/package.xml deleted file mode 100644 index 0c00cdb8..00000000 --- a/grid_map_filters_rsl/package.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - grid_map_filters_rsl - 0.1.0 - Extension of grid map filters package with op-cv filters and others - Fabian Jenelten - Proprietary - http://bitbucket.org/leggedrobotics/anymal_rsl_locomotion - Fabian Jenelten - - catkin - cmake_clang_tools - grid_map_cv - grid_map_core - - gtest - cmake_code_coverage - diff --git a/grid_map_filters_rsl/src/smoothing.cpp b/grid_map_filters_rsl/src/smoothing.cpp deleted file mode 100644 index 300ad54f..00000000 --- a/grid_map_filters_rsl/src/smoothing.cpp +++ /dev/null @@ -1,211 +0,0 @@ -/** - * @file smoothing.cpp - * @authors Fabian Jenelten - * @date 18.05, 2021 - * @affiliation ETH RSL - * @brief Smoothing and outlier rejection filters. - */ - -// grid map filters rsl. -#include - -// open cv. -#include -#include -#include -#include -#include -#include - -namespace grid_map { -namespace smoothing { - -void median(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int deltaKernelSize, - int numberOfRepeats) { - // Create new layer if missing. - if (!map.exists(layerOut)) { - map.add(layerOut); - } - - if (kernelSize + deltaKernelSize * (numberOfRepeats - 1) <= 5) { - // Convert to image. - cv::Mat elevationImage; - cv::eigen2cv(map.get(layerIn), elevationImage); - - for (auto iter = 0; iter < numberOfRepeats; ++iter) { - cv::medianBlur(elevationImage, elevationImage, kernelSize); - kernelSize += deltaKernelSize; - } - - // Set output layer. - cv::cv2eigen(elevationImage, map.get(layerOut)); - } - - // Larger kernel sizes require a specific format. - else { - // Reference to in map. - const grid_map::Matrix& H_in = map.get(layerIn); - - // Convert grid map to image. - cv::Mat elevationImage; - const float minValue = H_in.minCoeffOfFinites(); - const float maxValue = H_in.maxCoeffOfFinites(); - grid_map::GridMapCvConverter::toImage(map, layerIn, CV_8UC1, minValue, maxValue, elevationImage); - - for (auto iter = 0; iter < numberOfRepeats; ++iter) { - cv::medianBlur(elevationImage, elevationImage, kernelSize); - kernelSize += deltaKernelSize; - } - - // Get image as float. - cv::Mat elevationImageFloat; - constexpr float maxUCharValue = 255.F; - elevationImage.convertTo(elevationImageFloat, CV_32F, (maxValue - minValue) / maxUCharValue, minValue); - - // Convert back to grid map. - cv::cv2eigen(elevationImageFloat, map.get(layerOut)); - } -} - -void boxBlur(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int numberOfRepeats) { - // Create new layer if missing. - if (!map.exists(layerOut)) { - map.add(layerOut); - } - - // Convert to image. - cv::Mat elevationImage; - cv::eigen2cv(map.get(layerIn), elevationImage); - - // Box blur. - cv::Size kernelSize2D(kernelSize, kernelSize); - for (auto iter = 0; iter < numberOfRepeats; ++iter) { - cv::blur(elevationImage, elevationImage, kernelSize2D); - } - - // Set output layer. - cv::cv2eigen(elevationImage, map.get(layerOut)); -} - -void gaussianBlur(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, double sigma) { - // Create new layer if missing. - if (!map.exists(layerOut)) { - map.add(layerOut); - } - - // Convert to image. - cv::Mat elevationImage; - cv::eigen2cv(map.get(layerIn), elevationImage); - - // Box blur. - cv::Size kernelSize2D(kernelSize, kernelSize); - cv::GaussianBlur(elevationImage, elevationImage, kernelSize2D, sigma); - - // Set output layer. - cv::cv2eigen(elevationImage, map.get(layerOut)); -} - -void nlm(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int searchWindow, float w) { - // Create new layer if missing. - if (!map.exists(layerOut)) { - map.add(layerOut); - } - - // Get input layer. - const grid_map::Matrix& H_in = map.get(layerIn); - - // Convert grid map to image. - cv::Mat elevationImageIn; - const float minValue = H_in.minCoeffOfFinites(); - const float maxValue = H_in.maxCoeffOfFinites(); - grid_map::GridMapCvConverter::toImage(map, layerIn, CV_8UC1, minValue, maxValue, elevationImageIn); - - // Filter. - cv::Mat elevationImageOut; - cv::fastNlMeansDenoising(elevationImageIn, elevationImageOut, w, kernelSize, searchWindow); - - // Get as float. - cv::Mat elevationImageFloat; - constexpr float maxUCharValue = 255.F; - elevationImageOut.convertTo(elevationImageFloat, CV_32F, (maxValue - minValue) / maxUCharValue, minValue); - - // Set output layer. - cv::cv2eigen(elevationImageFloat, map.get(layerOut)); -} - -void bm3d(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int searchWindow, float w) { - // Create new layer if missing. - if (!map.exists(layerOut)) { - map.add(layerOut); - } - - // Get input layer. - const grid_map::Matrix& H_in = map.get(layerIn); - - // Convert grid map to image. - cv::Mat elevationImageIn; - const float minValue = H_in.minCoeffOfFinites(); - const float maxValue = H_in.maxCoeffOfFinites(); - grid_map::GridMapCvConverter::toImage(map, layerIn, CV_8UC1, minValue, maxValue, elevationImageIn); - - // Filter. - cv::Mat elevationImageOut; - cv::xphoto::bm3dDenoising(elevationImageIn, elevationImageOut, w, kernelSize, searchWindow, 2500, 400, 8, 1, 0.0f, cv::NORM_L1, - cv::xphoto::BM3D_STEP1); - - // Get as float. - cv::Mat elevationImageFloat; - constexpr float maxUCharValue = 255.F; - elevationImageOut.convertTo(elevationImageFloat, CV_32F, (maxValue - minValue) / maxUCharValue, minValue); - - // Set output layer. - cv::cv2eigen(elevationImageFloat, map.get(layerOut)); -} - -void bilateralFilter(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, double w) { - // Create new layer if missing. - if (!map.exists(layerOut)) { - map.add(layerOut); - } - - // Convert to image. - cv::Mat elevationImageIn; - cv::eigen2cv(map.get(layerIn), elevationImageIn); - - // Filter. - cv::Mat elevationImageOut; - cv::bilateralFilter(elevationImageIn, elevationImageOut, kernelSize, w, w); - - // Set output layer. - cv::cv2eigen(elevationImageOut, map.get(layerOut)); -} - -void tvL1(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, double lambda, int n) { - // Create new layer if missing. - if (!map.exists(layerOut)) { - map.add(layerOut); - } - - // Get input layer. - const grid_map::Matrix& H_in = map.get(layerIn); - - // Convert grid map to image. - cv::Mat elevationImageIn; - const float minValue = H_in.minCoeffOfFinites(); - const float maxValue = H_in.maxCoeffOfFinites(); - grid_map::GridMapCvConverter::toImage(map, layerIn, CV_8UC1, minValue, maxValue, elevationImageIn); - - // Filter. - cv::Mat elevationImageOut; - cv::denoise_TVL1({elevationImageIn}, elevationImageOut, lambda, n); - - // Get as float. - cv::Mat elevationImageFloat; - constexpr float maxUCharValue = 255.F; - elevationImageOut.convertTo(elevationImageFloat, CV_32F, (maxValue - minValue) / maxUCharValue, minValue); - - // Set output layer. - cv::cv2eigen(elevationImageFloat, map.get(layerOut)); -} -} // namespace smoothing -} // namespace grid_map diff --git a/plane_segmentation/README.md b/plane_segmentation/README.md new file mode 100644 index 00000000..d9c17ae7 --- /dev/null +++ b/plane_segmentation/README.md @@ -0,0 +1,45 @@ +# Plane Segmentation + +## Overview + +This is a C++ ROS package for extracting convex polygons from elevation maps. +In a first step, the terrain is segmented into planes, and their non-convex contour is extracted. +In a second step, a local convex approximation can be obtained. + +## Usage + +### Build + +```bash +catkin build convex_plane_decomposition_ros +``` + +### Run as node + +```bash +roslaunch convex_plane_decomposition_ros convex_plane_decomposition.launch +``` + +### Run demo + +```bash +roslaunch convex_plane_decomposition_ros demo.launch +``` + +### Convex approximation demo + +A simple 2D demo the convex inner approximation is available: + +```bash +rosrun convex_plane_decomposition_ros convex_plane_decomposition_ros_TestShapeGrowing +``` + +### Parameters + +You can select input map topics, pipeline parameters etc. in the respective yaml file in + +```bash +convex_plane_decomposition_ros/config/ +``` + +Some other parameters are set through the launch files. diff --git a/plane_segmentation/cgal5_ament/.gitignore b/plane_segmentation/cgal5_ament/.gitignore new file mode 100644 index 00000000..259148fa --- /dev/null +++ b/plane_segmentation/cgal5_ament/.gitignore @@ -0,0 +1,32 @@ +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app diff --git a/plane_segmentation/cgal5_ament/CMakeLists.txt b/plane_segmentation/cgal5_ament/CMakeLists.txt new file mode 100644 index 00000000..10107913 --- /dev/null +++ b/plane_segmentation/cgal5_ament/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 3.14) +project(cgal5_ament) + +# Use ament for ROS2 +find_package(ament_cmake REQUIRED) + +include(ExternalProject) + +set(VERSION 5.3) + +file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/include) + +set(CGAL_VERSION 5.3) +ExternalProject_Add(cgal + URL https://github.com/CGAL/cgal/archive/refs/tags/v${CGAL_VERSION}.tar.gz + UPDATE_COMMAND "" + CMAKE_ARGS + -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR} + -DCMAKE_BUILD_TYPE:STRING=Release + BUILD_COMMAND $(MAKE) + INSTALL_COMMAND $(MAKE) install +) + +ament_export_include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) + +install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/include/CGAL/ + DESTINATION include/CGAL/ +) + +ament_package() diff --git a/plane_segmentation/cgal5_ament/README.md b/plane_segmentation/cgal5_ament/README.md new file mode 100644 index 00000000..af601fd7 --- /dev/null +++ b/plane_segmentation/cgal5_ament/README.md @@ -0,0 +1,3 @@ +# cgal5_catkin + +Catkin wrapper of the Computational Geometry Algorithms Library (CGAL) diff --git a/plane_segmentation/cgal5_ament/cmake/cgal-extras.cmake.in b/plane_segmentation/cgal5_ament/cmake/cgal-extras.cmake.in new file mode 100644 index 00000000..3db7988c --- /dev/null +++ b/plane_segmentation/cgal5_ament/cmake/cgal-extras.cmake.in @@ -0,0 +1,6 @@ +# Makes CGAL/find___.cmake available +list(APPEND CMAKE_MODULE_PATH @CATKIN_DEVEL_PREFIX@/lib/cmake/CGAL) + +# Cpp standard version +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) \ No newline at end of file diff --git a/plane_segmentation/cgal5_ament/package.xml b/plane_segmentation/cgal5_ament/package.xml new file mode 100644 index 00000000..d280c209 --- /dev/null +++ b/plane_segmentation/cgal5_ament/package.xml @@ -0,0 +1,19 @@ + + + cgal5_ament + 0.0.1 + ROS2 package integrating CGAL + Lorenzo Terenzi + BSD-3-Clause + + + ament_cmake + + + boost + + + + ament_cmake + + \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/CMakeLists.txt b/plane_segmentation/convex_plane_decomposition/CMakeLists.txt new file mode 100644 index 00000000..44d7d67a --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/CMakeLists.txt @@ -0,0 +1,114 @@ +cmake_minimum_required(VERSION 3.10) +project(convex_plane_decomposition) + +# Catkin dependencies +set(CATKIN_PACKAGE_DEPENDENCIES + cgal5_ament + grid_map_core + grid_map_cv + grid_map_filters_rsl +) + +find_package(catkin REQUIRED COMPONENTS + ${CATKIN_PACKAGE_DEPENDENCIES} +) + +# CGAL dependencies (QUIET because they cannot be found by Clion) +find_package(GMP 4.2 QUIET) +find_package(MPFR 2.2.1 QUIET) +find_package(Boost 1.57 QUIET) + +# OpenCv +find_package(OpenCV REQUIRED) + +# Eigen +find_package(Eigen3 3.3 REQUIRED NO_MODULE) + +# Cpp standard version +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +################################### +## catkin specific configuration ## +################################### +catkin_package( + INCLUDE_DIRS + include + ${EIGEN3_INCLUDE_DIRS} + LIBRARIES + ${PROJECT_NAME} + CATKIN_DEPENDS + ${CATKIN_PACKAGE_DEPENDENCIES} + DEPENDS + OpenCV GMP MPFR Boost +) + +########### +## Build ## +########### + +include_directories( + include + ${catkin_INCLUDE_DIRS} + ${EIGEN3_INCLUDE_DIR} +) + +add_library(${PROJECT_NAME} + src/contour_extraction/ContourExtraction.cpp + src/contour_extraction/Upsampling.cpp + src/ransac/RansacPlaneExtractor.cpp + src/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.cpp + src/ConvexRegionGrowing.cpp + src/Draw.cpp + src/GridMapPreprocessing.cpp + src/LoadGridmapFromImage.cpp + src/PlanarRegion.cpp + src/PlaneDecompositionPipeline.cpp + src/Postprocessing.cpp + src/SegmentedPlaneProjection.cpp +) +add_dependencies(${PROJECT_NAME} + ${catkin_EXPORTED_TARGETS} +) +target_link_libraries(${PROJECT_NAME} + ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} + gmp + mpfr +) +target_compile_options(${PROJECT_NAME} + PUBLIC -DCGAL_HAS_THREADS +) + +############# +## Install ## +############# + +install(TARGETS ${PROJECT_NAME} + ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} + LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} + RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) +install(DIRECTORY include/${PROJECT_NAME}/ + DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +) + +############# +## Testing ## +############# + +catkin_add_gtest(test_${PROJECT_NAME} + test/testConvexApproximation.cpp + test/testPipeline.cpp + test/testPlanarRegion.cpp + test/testRegionGrowing.cpp + test/testUpsampling.cpp +) +target_link_libraries(test_${PROJECT_NAME} + ${PROJECT_NAME} + ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} + gmp + mpfr + gtest_main +) diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ConvexRegionGrowing.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ConvexRegionGrowing.h new file mode 100644 index 00000000..a760ecd7 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ConvexRegionGrowing.h @@ -0,0 +1,20 @@ +// +// Created by rgrandia on 09.06.20. +// + +#pragma once + +#include "PolygonTypes.h" + +namespace convex_plane_decomposition { + +CgalPolygon2d createRegularPolygon(const CgalPoint2d& center, double radius, int numberOfVertices); + +CgalPolygon2d growConvexPolygonInsideShape(const CgalPolygon2d& parentShape, CgalPoint2d center, int numberOfVertices, double growthFactor); + +CgalPolygon2d growConvexPolygonInsideShape(const CgalPolygonWithHoles2d& parentShape, CgalPoint2d center, int numberOfVertices, + double growthFactor); + +void updateMean(CgalPoint2d& mean, const CgalPoint2d& oldValue, const CgalPoint2d& updatedValue, int N); + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Draw.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Draw.h new file mode 100644 index 00000000..07de3c16 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Draw.h @@ -0,0 +1,27 @@ +// +// Created by rgrandia on 09.06.20. +// + +#pragma once + +#include +#include +#include +#include "opencv2/imgproc/imgproc.hpp" + +#include "PolygonTypes.h" + +namespace convex_plane_decomposition { + +cv::Vec3b randomColor(); + +std::vector toCv(const CgalPolygon2d& polygon); + +void drawContour(cv::Mat& img, const CgalPoint2d& point, double radius = 1, const cv::Vec3b* color = nullptr); +void drawContour(cv::Mat& img, const CgalPolygon2d& polygon, const cv::Vec3b* color = nullptr); +void drawContour(cv::Mat& img, const CgalPolygonWithHoles2d& polygonWithHoles2d, const cv::Vec3b* color = nullptr); + +CgalPolygon2d scaleShape(const CgalPolygon2d& polygon, double scale); +CgalPolygonWithHoles2d scaleShape(const CgalPolygonWithHoles2d& polygonWithHoles, double scale); + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/GeometryUtils.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/GeometryUtils.h new file mode 100644 index 00000000..bd7c849d --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/GeometryUtils.h @@ -0,0 +1,140 @@ +// +// Created by rgrandia on 09.06.20. +// + +#pragma once + +#include "PlanarRegion.h" +#include "PolygonTypes.h" + +namespace convex_plane_decomposition { + +inline bool doEdgesIntersect(const CgalSegment2d& line, const CgalPolygon2d& contour) { + for (auto edgeIt = contour.edges_begin(); edgeIt != contour.edges_end(); ++edgeIt) { + if (CGAL::do_intersect(line, *edgeIt)) { + return true; + } + } + return false; +} + +inline bool doEdgesIntersect(const CgalSegment2d& line, const CgalPolygonWithHoles2d& parentShape) { + if (doEdgesIntersect(line, parentShape.outer_boundary())) { + return true; + } else { + for (const auto& hole : parentShape.holes()) { + if (doEdgesIntersect(line, hole)) { + return true; + } + } + } + return false; +} + +inline double squaredDistance(const CgalPoint2d& point, const CgalPolygon2d& polygon) { + double minDistSquared = std::numeric_limits::max(); + for (auto edgeIt = polygon.edges_begin(); edgeIt != polygon.edges_end(); ++edgeIt) { + double distSquare = CGAL::squared_distance(point, *edgeIt); + minDistSquared = std::min(distSquare, minDistSquared); + } + return minDistSquared; +} + +inline double squaredDistance(const CgalPoint2d& point, const CgalPolygonWithHoles2d& parentShape) { + double minDistSquared = squaredDistance(point, parentShape.outer_boundary()); + for (const auto& hole : parentShape.holes()) { + double distSquare = squaredDistance(point, hole); + minDistSquared = std::min(distSquare, minDistSquared); + } + return minDistSquared; +} + +inline double squaredDistance(const CgalPoint2d& point, const CgalCircle2d& circle) { + auto dx = (point.x() - circle.center().x()); + auto dy = (point.y() - circle.center().y()); + return dx * dx + dy * dy; +} + +template +double distance(const CgalPoint2d& point, const T& shape) { + double distanceSquared = squaredDistance(point, shape); + return (distanceSquared > 0.0) ? std::sqrt(distanceSquared) : 0.0; +} + +inline bool isInside(const CgalPoint2d& point, const CgalCircle2d& circle) { + return squaredDistance(point, circle) <= circle.squared_radius(); +} + +inline bool isInside(const CgalPoint2d& point, const CgalPolygon2d& polygon) { + const auto boundedSide = CGAL::bounded_side_2(polygon.begin(), polygon.end(), point); + return boundedSide == CGAL::ON_BOUNDED_SIDE || boundedSide == CGAL::ON_BOUNDARY; +} + +inline bool isInside(const CgalPoint2d& point, const CgalPolygonWithHoles2d& polygonWithHoles) { + if (isInside(point, polygonWithHoles.outer_boundary())) { + // Inside the outer contour -> return false if the point is inside any of the holes + for (const auto& hole : polygonWithHoles.holes()) { + const auto boundedSide = CGAL::bounded_side_2(hole.begin(), hole.end(), point); + if (boundedSide == CGAL::ON_BOUNDED_SIDE) { // The edge of the hole is considered part of the polygon + return false; + } + } + return true; + } else { + return false; + } +} + +inline CgalPoint2d getPointOnLine(const CgalPoint2d& start, const CgalPoint2d& end, double factor) { + return {factor * (end.x() - start.x()) + start.x(), factor * (end.y() - start.y()) + start.y()}; +} + +inline CgalPoint2d projectToClosestPoint(const CgalPoint2d& point, const CgalSegment2d& segment) { + // The segment as a vector, with the source being the origin + const Eigen::Vector2d sourceToTarget{segment.target().x() - segment.source().x(), segment.target().y() - segment.source().y()}; + const double sourceToTargetDistance = sourceToTarget.norm(); + const Eigen::Vector2d n = sourceToTarget / sourceToTargetDistance; + + // Vector from source to the query point + const Eigen::Vector2d sourceToPoint{point.x() - segment.source().x(), point.y() - segment.source().y()}; + + // Projection to the line, clamped to be between source and target points + const double coeff = std::min(std::max(0.0, n.dot(sourceToPoint)), sourceToTargetDistance); + + return {coeff * n.x() + segment.source().x(), coeff * n.y() + segment.source().y()}; +} + +inline CgalPoint2d projectToClosestPoint(const CgalPoint2d& point, const CgalPolygon2d& polygon) { + double minDistSquared = CGAL::squared_distance(point, *polygon.edges_begin()); + auto closestEdge = polygon.edges_begin(); + for (auto edgeIt = std::next(polygon.edges_begin()); edgeIt != polygon.edges_end(); ++edgeIt) { + double distSquare = CGAL::squared_distance(point, *edgeIt); + if (distSquare < minDistSquared) { + closestEdge = edgeIt; + minDistSquared = distSquare; + } + } + return projectToClosestPoint(point, *closestEdge); +} + +inline void transformInPlace(CgalPolygon2d& polygon, const std::function& f) { + for (auto& point : polygon) { + f(point); + } +} + +inline void transformInPlace(CgalPolygonWithHoles2d& polygonWithHoles, const std::function& f) { + transformInPlace(polygonWithHoles.outer_boundary(), f); + for (auto& hole : polygonWithHoles.holes()) { + transformInPlace(hole, f); + } +} + +inline void transformInPlace(BoundaryWithInset& boundaryWithInset, const std::function& f) { + transformInPlace(boundaryWithInset.boundary, f); + for (auto& inset : boundaryWithInset.insets) { + transformInPlace(inset, f); + } +} + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/GridMapPreprocessing.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/GridMapPreprocessing.h new file mode 100644 index 00000000..ebd34b0f --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/GridMapPreprocessing.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +#include + +namespace convex_plane_decomposition { + +struct PreprocessingParameters { + /// Resample to this resolution, set to negative values to skip + double resolution = 0.04; + /// Kernel size of the median filter, either 3 or 5 + int kernelSize = 3; + /// Number of times the image is filtered + int numberOfRepeats = 2; +}; + +class GridMapPreprocessing { + public: + GridMapPreprocessing(const PreprocessingParameters& parameters); + + void preprocess(grid_map::GridMap& gridMap, const std::string& layer) const; + + private: + void denoise(grid_map::GridMap& gridMap, const std::string& layer) const; + void changeResolution(grid_map::GridMap& gridMap, const std::string& layer) const; + void inpaint(grid_map::GridMap& gridMap, const std::string& layer) const; + + PreprocessingParameters parameters_; +}; + +/** + * @return true if any of the elements in the map are finite + */ +bool containsFiniteValue(const grid_map::Matrix& map); + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/LoadGridmapFromImage.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/LoadGridmapFromImage.h new file mode 100644 index 00000000..1b8f117a --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/LoadGridmapFromImage.h @@ -0,0 +1,26 @@ +// +// Created by rgrandia on 16.03.22. +// + +#pragma once + +#include + +#include + +namespace convex_plane_decomposition { + +/** + * Load an elevation map from a grey scale image. + * + * @param filePath : absolute path to file + * @param elevationLayer : name of the elevation layer + * @param frameId : frame assigned to loaded map + * @param resolution : map resolution [m/pixel] + * @param scale : distance [m] between lowest and highest point in the map + * @return Gridmap with the loaded image as elevation layer. + */ +grid_map::GridMap loadGridmapFromImage(const std::string& filePath, const std::string& elevationLayer, const std::string& frameId, + double resolution, double scale); + +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PlanarRegion.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PlanarRegion.h new file mode 100644 index 00000000..5e90ac00 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PlanarRegion.h @@ -0,0 +1,66 @@ +// +// Created by rgrandia on 10.06.20. +// + +#pragma once + +#include +#include + +#include + +#include "PolygonTypes.h" + +namespace convex_plane_decomposition { + +struct NormalAndPosition { + /// 3D position. + Eigen::Vector3d position; + + /// Surface normal. + Eigen::Vector3d normal; +}; + +struct BoundaryWithInset { + /// Boundary of the planar region. + CgalPolygonWithHoles2d boundary; + + /// Encodes an inward offset to the boundary. + std::vector insets; +}; + +struct PlanarRegion { + /// All 2d points are in the terrain frame + BoundaryWithInset boundaryWithInset; + + /// 2D bounding box in terrain frame containing all the boundary points + CgalBbox2d bbox2d; + + /// 3D Transformation from terrain to world. v_world = T * v_plane. Use .linear() for the rotation and .translation() for the translation + Eigen::Isometry3d transformPlaneToWorld; +}; + +struct PlanarTerrain { + std::vector planarRegions; + grid_map::GridMap gridMap; +}; + +/** + * Convert a position and normal the a transform from the induced local frame to the global frame. + * + * For example, if the normal and position are defined in world. We return a transform T, such that v_world = T * v_plane. + * The normal will be taken as the z-direction of the local frame. The x and y direction are arbitrary. + */ +Eigen::Isometry3d getTransformLocalToGlobal(const NormalAndPosition& normalAndPosition); + +/** + * Project a 2D point in world along gravity to obtain a 2D point in the plane + */ +CgalPoint2d projectToPlaneAlongGravity(const CgalPoint2d& worldFrameXY, const Eigen::Isometry3d& transformPlaneToWorld); + +/** + * Transforms a point on the plane to a 3D position expressed in the world frame + */ +Eigen::Vector3d positionInWorldFrameFromPosition2dInPlane(const CgalPoint2d& planeXY, const Eigen::Isometry3d& transformPlaneToWorld); + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PlaneDecompositionPipeline.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PlaneDecompositionPipeline.h new file mode 100644 index 00000000..78fc52e0 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PlaneDecompositionPipeline.h @@ -0,0 +1,77 @@ +// +// Created by rgrandia on 16.03.22. +// + +#pragma once + +#include "convex_plane_decomposition/GridMapPreprocessing.h" +#include "convex_plane_decomposition/PlanarRegion.h" +#include "convex_plane_decomposition/Postprocessing.h" +#include "convex_plane_decomposition/Timer.h" +#include "convex_plane_decomposition/contour_extraction/ContourExtraction.h" +#include "convex_plane_decomposition/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.h" + +namespace convex_plane_decomposition { + +/** + * Encloses the full plane decomposition pipeline: + * + * Input: + * - Elevation map + * Output: + * - Planar terrain (planes + filtered elevation map) + * - Segmented elevation map + */ +class PlaneDecompositionPipeline { + public: + /** Collection of all parameters of steps in the pipeline */ + struct Config { + PreprocessingParameters preprocessingParameters; + sliding_window_plane_extractor::SlidingWindowPlaneExtractorParameters slidingWindowPlaneExtractorParameters; + ransac_plane_extractor::RansacPlaneExtractorParameters ransacPlaneExtractorParameters; + contour_extraction::ContourExtractionParameters contourExtractionParameters; + PostprocessingParameters postprocessingParameters; + }; + + /** + * Constructor + * @param config : configuration containing all parameters of the pipeline + */ + PlaneDecompositionPipeline(const Config& config); + + /** + * Trigger update of the pipeline + * @param gridMap : gridmap to process. Will be modified and added to the resulting planar terrain. + * @param elevationLayer : Name of the elevation layer. + */ + void update(grid_map::GridMap&& gridMap, const std::string& elevationLayer); + + /// Access to the Pipeline result. + PlanarTerrain& getPlanarTerrain() { return planarTerrain_; } + + /// Fills in the resulting segmentation into a gridmap layer data. + void getSegmentation(grid_map::GridMap::Matrix& segmentation) const; + + // Timers + const Timer& getPrepocessTimer() const { return preprocessTimer_; } + const Timer& getSlidingWindowTimer() const { return slidingWindowTimer_; } + const Timer& getContourExtractionTimer() const { return contourExtractionTimer_; } + const Timer& getPostprocessTimer() const { return postprocessTimer_; } + + private: + PlanarTerrain planarTerrain_; + + // Pipeline + GridMapPreprocessing preprocessing_; + sliding_window_plane_extractor::SlidingWindowPlaneExtractor slidingWindowPlaneExtractor_; + contour_extraction::ContourExtraction contourExtraction_; + Postprocessing postprocessing_; + + // Timing + Timer preprocessTimer_; + Timer slidingWindowTimer_; + Timer contourExtractionTimer_; + Timer postprocessTimer_; +}; + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PolygonTypes.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PolygonTypes.h new file mode 100644 index 00000000..40fe764f --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/PolygonTypes.h @@ -0,0 +1,21 @@ +// +// Created by rgrandia on 07.06.20. +// + +#pragma once + +#include +#include +#include + +namespace convex_plane_decomposition { + +using K = CGAL::Exact_predicates_inexact_constructions_kernel; +using CgalPoint2d = K::Point_2; +using CgalCircle2d = K::Circle_2; +using CgalPolygon2d = CGAL::Polygon_2; +using CgalSegment2d = CgalPolygon2d::Segment_2; +using CgalPolygonWithHoles2d = CGAL::Polygon_with_holes_2; +using CgalBbox2d = CGAL::Bbox_2; + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Postprocessing.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Postprocessing.h new file mode 100644 index 00000000..12b85323 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Postprocessing.h @@ -0,0 +1,47 @@ +#pragma once + +#include "convex_plane_decomposition/PlanarRegion.h" + +namespace convex_plane_decomposition { + +struct PostprocessingParameters { + /// Added offset in z direction to compensate for the location of the foot frame w.r.t. the elevation map + double extracted_planes_height_offset = 0.0; + + /// Added offset in z direction added in cells that are not traversable + double nonplanar_height_offset = 0.02; + + /// Size of the kernel creating the boundary offset. In number of pixels. + int nonplanar_horizontal_offset = 1; + + /// Half the width of the dilation used before the smooth layer [m] + double smoothing_dilation_size = 0.2; + + /// Half the width of the box kernel used for the smooth layer [m] + double smoothing_box_kernel_size = 0.1; + + /// Half the width of the Gaussian kernel used for the smooth layer [m] + double smoothing_gauss_kernel_size = 0.05; +}; + +class Postprocessing { + public: + Postprocessing(const PostprocessingParameters& parameters); + + /** + * @param planarTerrain + * @param elevationLayer : name of the elevation layer + * @param planeSegmentationLayer : name of the planarity layer with planar = 1.0, non-planar = 0.0 + */ + void postprocess(PlanarTerrain& planarTerrain, const std::string& elevationLayer, const std::string& planeSegmentationLayer) const; + + private: + void dilationInNonplanarRegions(Eigen::MatrixXf& elevationData, const Eigen::MatrixXf& planarityMask) const; + void addHeightOffset(Eigen::MatrixXf& elevationData, const Eigen::MatrixXf& planarityMask) const; + void addHeightOffset(std::vector& planarRegions) const; + void addSmoothLayer(grid_map::GridMap& gridMap, const Eigen::MatrixXf& elevationData, const Eigen::MatrixXf& planarityMask) const; + + PostprocessingParameters parameters_; +}; + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/SegmentedPlaneProjection.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/SegmentedPlaneProjection.h new file mode 100644 index 00000000..a48f2236 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/SegmentedPlaneProjection.h @@ -0,0 +1,77 @@ +// +// Created by rgrandia on 12.10.21. +// + +#pragma once + +#include + +#include +#include + +namespace convex_plane_decomposition { + +/** + * Projects a point in the plane to the closest point on the contour of a planar region. We take the inset (slight inward offset from the + * boundary) as the contour to project to. + * @param queryProjectedToPlane : 2D point in the frame of the planar regions + * @param planarRegion : planar region to project to + * @return projected 2D point in the frame of the planar regions + */ +CgalPoint2d projectToPlanarRegion(const CgalPoint2d& queryProjectedToPlane, const PlanarRegion& planarRegion); + +/** + * Sorting information as an intermediate step to find the best plane projection + */ +struct RegionSortingInfo { + const PlanarRegion* regionPtr{nullptr}; + CgalPoint2d positionInTerrainFrame{0.0, 0.0}; + double boundingBoxSquareDistance{0.0}; +}; + +/** + * Compute sorting info and sort according to the bounding box distances. + * + * @param positionInWorld : Query point in world frame + * @param planarRegions : Candidate planar regions + * @return RegionSortingInfo, sorted according to the bounding box distance + */ +std::vector sortWithBoundingBoxes(const Eigen::Vector3d& positionInWorld, + const std::vector& planarRegions); + +struct PlanarTerrainProjection { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + /// Selected region + const PlanarRegion* regionPtr{nullptr}; + + /// Projected point in terrain frame of the selected region + CgalPoint2d positionInTerrainFrame{0.0, 0.0}; + + /// Projected point in world frame + Eigen::Vector3d positionInWorld{0.0, 0.0, 0.0}; + + /// Projection cost, see getBestPlanarRegionAtPositionInWorld + double cost{0.0}; +}; + +/** + * This function considers the projection of a 3D query point to a set of candidate regions. + * + * The "best" region is picked according to the following cost: + * cost = |p - p_projected|^2 + penaltyFunction(p_projected), + * where p is the query point, and p_projected is the Euclidean projection to the candidate region, both in world frame. + * + * The bounding box of each region is used to find a lower bound on this cost, it is therefore important the user defined penalty is + * non-negative. + * + * @param positionInWorld : Query point in world frame + * @param planarRegions : Candidate planar regions + * @param penaltyFunction : a non-negative (!) scoring function. + * @return Projection and information + */ +PlanarTerrainProjection getBestPlanarRegionAtPositionInWorld(const Eigen::Vector3d& positionInWorld, + const std::vector& planarRegions, + const std::function& penaltyFunction); + +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/SegmentedPlanesMap.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/SegmentedPlanesMap.h new file mode 100644 index 00000000..f5576239 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/SegmentedPlanesMap.h @@ -0,0 +1,32 @@ +// +// Created by rgrandia on 10.06.20. +// + +#pragma once + +#include +#include + +#include "PlanarRegion.h" + +namespace convex_plane_decomposition { + +struct SegmentedPlanesMap { + /// Unordered collection of all labels and corresponding plane parameters + std::vector> labelPlaneParameters; + + /// Image with a each pixel being assigned and integer value corresponding to the label. Might contain labels that are not in + /// labelPlaneParameters. These labels should be seen as background. + cv::Mat labeledImage; + + /// Size of each pixel [m] + double resolution; + + /// World X-Y position [m] of the (0, 0) pixel in the image. + Eigen::Vector2d mapOrigin; + + /// All label values are smaller than or equal to highestLabel + int highestLabel; +}; + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Timer.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Timer.h new file mode 100644 index 00000000..3eaa93e6 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/Timer.h @@ -0,0 +1,82 @@ +// +// Created by rgrandia on 15.03.22. +// + +#pragma once + +namespace convex_plane_decomposition { + +/** + * Timer class that can be repeatedly started and stopped. Statistics are collected for all measured intervals . + * + * Taken and adapted from ocs2_core + */ +class Timer { + public: + Timer() + : numTimedIntervals_(0), + totalTime_(std::chrono::nanoseconds::zero()), + maxIntervalTime_(std::chrono::nanoseconds::zero()), + lastIntervalTime_(std::chrono::nanoseconds::zero()), + startTime_(std::chrono::steady_clock::now()) {} + + /** + * Reset the timer statistics + */ + void reset() { + totalTime_ = std::chrono::nanoseconds::zero(); + maxIntervalTime_ = std::chrono::nanoseconds::zero(); + lastIntervalTime_ = std::chrono::nanoseconds::zero(); + numTimedIntervals_ = 0; + } + + /** + * Start timing an interval + */ + void startTimer() { startTime_ = std::chrono::steady_clock::now(); } + + /** + * Stop timing of an interval + */ + void endTimer() { + auto endTime = std::chrono::steady_clock::now(); + lastIntervalTime_ = std::chrono::duration_cast(endTime - startTime_); + maxIntervalTime_ = std::max(maxIntervalTime_, lastIntervalTime_); + totalTime_ += lastIntervalTime_; + numTimedIntervals_++; + }; + + /** + * @return Number of intervals that were timed + */ + int getNumTimedIntervals() const { return numTimedIntervals_; } + + /** + * @return Total cumulative time of timed intervals + */ + double getTotalInMilliseconds() const { return std::chrono::duration(totalTime_).count(); } + + /** + * @return Maximum duration of a single interval + */ + double getMaxIntervalInMilliseconds() const { return std::chrono::duration(maxIntervalTime_).count(); } + + /** + * @return Duration of the last timed interval + */ + double getLastIntervalInMilliseconds() const { return std::chrono::duration(lastIntervalTime_).count(); } + + /** + * @return Average duration of all timed intervals + */ + double getAverageInMilliseconds() const { return getTotalInMilliseconds() / numTimedIntervals_; } + + private: + int numTimedIntervals_; + std::chrono::nanoseconds totalTime_; + std::chrono::nanoseconds maxIntervalTime_; + std::chrono::nanoseconds lastIntervalTime_; + std::chrono::steady_clock::time_point startTime_; +}; + +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/ContourExtraction.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/ContourExtraction.h new file mode 100644 index 00000000..1c0cbe90 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/ContourExtraction.h @@ -0,0 +1,47 @@ +// +// Created by rgrandia on 04.06.20. +// + +#pragma once + +#include + +#include "convex_plane_decomposition/PlanarRegion.h" +#include "convex_plane_decomposition/PolygonTypes.h" +#include "convex_plane_decomposition/SegmentedPlanesMap.h" + +#include "ContourExtractionParameters.h" + +namespace convex_plane_decomposition { +namespace contour_extraction { + +/** + * Extracts the contours in map resolution, but with the x and y axis flipped. + * This way all contours are in counter clockwise direction. + */ +class ContourExtraction { + public: + ContourExtraction(const ContourExtractionParameters& parameters); + + std::vector extractPlanarRegions(const SegmentedPlanesMap& segmentedPlanesMap); + + private: + ContourExtractionParameters parameters_; + cv::Mat insetKernel_; + cv::Mat marginKernel_; + + // Memory to reuse between calls + cv::Mat binaryImage_; +}; + +/// Modifies the image in-place! +std::vector extractBoundaryAndInset(cv::Mat& binary_image, const cv::Mat& erosionKernel); + +std::vector extractPolygonsFromBinaryImage(const cv::Mat& binary_image); + +CgalPolygon2d cgalPolygonFromOpenCv(const std::vector& openCvPolygon); + +CgalPoint2d pixelToWorldFrame(const CgalPoint2d& pixelspaceCgalPoint2d, double resolution, const Eigen::Vector2d& mapOffset); + +} // namespace contour_extraction +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/ContourExtractionParameters.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/ContourExtractionParameters.h new file mode 100644 index 00000000..009bc37e --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/ContourExtractionParameters.h @@ -0,0 +1,16 @@ +// +// Created by rgrandia on 07.06.20. +// + +#pragma once + +namespace convex_plane_decomposition { +namespace contour_extraction { + +struct ContourExtractionParameters { + /// Size of the kernel creating the boundary offset. In number of (sub) pixels. + int marginSize = 1; +}; + +} // namespace contour_extraction +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/Upsampling.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/Upsampling.h new file mode 100644 index 00000000..0f80223b --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/contour_extraction/Upsampling.h @@ -0,0 +1,31 @@ +// +// Created by rgrandia on 26.10.21. +// + +#pragma once + +#include "convex_plane_decomposition/SegmentedPlanesMap.h" + +namespace convex_plane_decomposition { +namespace contour_extraction { + +/** + * Upsamples an image such that all pixels are turned into 9 pixels, with the original pixel in the middle. + * Around the edges, each pixel is only upsamples in the inward direction. + * + * @param image : source image + * @return upsamples image + */ +cv::Mat upSample(const cv::Mat& image); + +/** + * Upsamples a segmented terrain such that the resulting terrain has 1/3 of the input resolution. (Each cell is split into 9 cells) + * This specific upsampling ratio makes it possible to keep labels in their exact original location in world frame. + * + * @param mapIn : source terrain + * @return upsampled terrain + */ +SegmentedPlanesMap upSample(const SegmentedPlanesMap& mapIn); + +} // namespace contour_extraction +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ransac/RansacPlaneExtractor.hpp b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ransac/RansacPlaneExtractor.hpp new file mode 100644 index 00000000..ca76b89d --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ransac/RansacPlaneExtractor.hpp @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include + +#include +#include +#include +#include + +#include "RansacPlaneExtractorParameters.h" + +namespace ransac_plane_extractor { + +// Point with normal related type declarations. +using Kernel = CGAL::Exact_predicates_inexact_constructions_kernel; +using Point3D = Kernel::Point_3; +using Vector3D = Kernel::Vector_3; +using PointWithNormal = std::pair; +using PwnVector = std::vector; + +// RANSAC plane extractor related type declarations. +using PointMap = CGAL::First_of_pair_property_map; +using NormalMap = CGAL::Second_of_pair_property_map; +using Traits = CGAL::Shape_detection::Efficient_RANSAC_traits; +using EfficientRansac = CGAL::Shape_detection::Efficient_RANSAC; +using Plane = CGAL::Shape_detection::Plane; +using Shape = CGAL::Shape_detection::Shape_base; + +class RansacPlaneExtractor { + public: + RansacPlaneExtractor(const RansacPlaneExtractorParameters& parameters); + + void setParameters(const RansacPlaneExtractorParameters& parameters); + + void detectPlanes(std::vector& points_with_normal); + + /// Return {plane normal, support vector} for the detected shape + static std::pair getPlaneParameters(Shape* shapePtr); + + /// Returns an iterator range. Data is still in the ransac_object + EfficientRansac::Shape_range getDetectedPlanes() const { return ransac_.shapes(); }; + + /// Returns an iterator range. Data is still in the ransac_object + EfficientRansac::Point_index_range getUnassignedPointIndices() { return ransac_.indices_of_unassigned_points(); } + + private: + EfficientRansac ransac_; + EfficientRansac::Parameters cgalRansacParameters_; +}; + +} // namespace ransac_plane_extractor diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ransac/RansacPlaneExtractorParameters.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ransac/RansacPlaneExtractorParameters.h new file mode 100644 index 00000000..ef94c557 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/ransac/RansacPlaneExtractorParameters.h @@ -0,0 +1,23 @@ +// +// Created by rgrandia on 07.06.20. +// + +#pragma once + +namespace ransac_plane_extractor { + +struct RansacPlaneExtractorParameters { + /// Set probability to miss the largest primitive at each iteration. + double probability = 0.01; + /// Detect shapes with at least N points. + double min_points = 4; + /// [m] Set maximum Euclidean distance between a point and a shape. + double epsilon = 0.025; + /// Set maximum Euclidean distance between points to be clustered. Two points are connected if separated by a distance of at most + /// 2*sqrt(2)*cluster_epsilon = 2.828 * cluster_epsilon + double cluster_epsilon = 0.08; + /// [deg] Set maximum normal deviation between cluster surface_normal and point normal. + double normal_threshold = 25.0; +}; + +} // namespace ransac_plane_extractor diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.h new file mode 100644 index 00000000..f60398ad --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.h @@ -0,0 +1,65 @@ +#pragma once + +#include + +#include +#include + +#include "convex_plane_decomposition/SegmentedPlanesMap.h" +#include "convex_plane_decomposition/ransac/RansacPlaneExtractor.hpp" + +#include "SlidingWindowPlaneExtractorParameters.h" + +namespace convex_plane_decomposition { +namespace sliding_window_plane_extractor { + +class SlidingWindowPlaneExtractor { + public: + SlidingWindowPlaneExtractor(const SlidingWindowPlaneExtractorParameters& parameters, + const ransac_plane_extractor::RansacPlaneExtractorParameters& ransacParameters); + + void runExtraction(const grid_map::GridMap& map, const std::string& layer_height); + + const SegmentedPlanesMap& getSegmentedPlanesMap() const { return segmentedPlanesMap_; } + + const cv::Mat& getBinaryLabeledImage() const { return binaryImagePatch_; } + + /** Can be run after extraction for debugging purpose */ + void addSurfaceNormalToMap(grid_map::GridMap& map, const std::string& layerPrefix) const; + + private: + bool isGloballyPlanar(const Eigen::Vector3d& normalVectorPlane, const Eigen::Vector3d& supportVectorPlane, + const std::vector& pointsWithNormal) const; + bool isWithinInclinationLimit(const Eigen::Vector3d& normalVectorPlane) const; + + std::pair computeNormalAndErrorForWindow(const Eigen::MatrixXf& windowData) const; + bool isLocallyPlanar(const Eigen::Vector3d& localNormal, double meanSquaredError) const; + + int getLinearIndex(int row, int col) const { return row + col * mapRows_; }; + + void computePlaneParametersForLabel(int label, std::vector& pointsWithNormal); + void refineLabelWithRansac(int label, std::vector& pointsWithNormal); + + void extractPlaneParametersFromLabeledImage(); + + void runSegmentation(); + + void runSlidingWindowDetector(); + + void setToBackground(int label); + + SlidingWindowPlaneExtractorParameters parameters_; + ransac_plane_extractor::RansacPlaneExtractorParameters ransacParameters_; + + const grid_map::GridMap* map_; + std::string elevationLayer_; + int mapRows_; + + std::vector surfaceNormals_; + std::vector pointsWithNormal_; + + cv::Mat binaryImagePatch_; + SegmentedPlanesMap segmentedPlanesMap_; +}; +} // namespace sliding_window_plane_extractor +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/sliding_window_plane_extraction/SlidingWindowPlaneExtractorParameters.h b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/sliding_window_plane_extraction/SlidingWindowPlaneExtractorParameters.h new file mode 100644 index 00000000..51009a4f --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/include/convex_plane_decomposition/sliding_window_plane_extraction/SlidingWindowPlaneExtractorParameters.h @@ -0,0 +1,44 @@ +// +// Created by rgrandia on 07.06.20. +// + +#pragma once + +namespace convex_plane_decomposition { +namespace sliding_window_plane_extractor { + +struct SlidingWindowPlaneExtractorParameters { + /// Size of the sliding window patch used for normal vector calculation and planarity detection + /// Should be an odd number and at least 3. + int kernel_size = 3; + + /// [#] Apply opening filter (erosion -> dilation) to planarity detection by this amount of pixels + int planarity_opening_filter = 0; + + /// [-] Maximum allowed angle between the surface normal and the world-z direction for a patch (converted to dotproduct bound) + double plane_inclination_threshold = std::cos(30.0 * M_PI / 180.0); + + /// [-] Maximum allowed angle between the surface normal and the world-z direction for a cell (converted to dotproduct bound) + double local_plane_inclination_threshold = std::cos(35.0 * M_PI / 180.0); + + /// [m] The allowed root-mean-squared deviation from the plane fitted to the patch. Higher -> not planar + double plane_patch_error_threshold = 0.02; + + /// [#] Labels with less points assigned to them are discarded + int min_number_points_per_label = 4; + + /// Label kernel connectivity. 4 or 8 (cross or box) + int connectivity = 4; + + /// Enable RANSAC refinement if the plane is not globally fitting to the assigned points. + bool include_ransac_refinement = true; + + /// [m] Allowed maximum distance from a point to the plane. If any point violates this, RANSAC is triggered + double global_plane_fit_distance_error_threshold = 0.025; + + /// [deg] Allowed normal vector deviation for a point w.r.t. the plane normal. If any point violates this, RANSAC is triggered + double global_plane_fit_angle_error_threshold_degrees = 25.0; +}; + +} // namespace sliding_window_plane_extractor +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/package.xml b/plane_segmentation/convex_plane_decomposition/package.xml new file mode 100644 index 00000000..85988116 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/package.xml @@ -0,0 +1,17 @@ + + + convex_plane_decomposition + 0.0.0 + The convex_plane_decomposition package + + Ruben Grandia + + MIT + + catkin + cgal5_ament + grid_map_core + grid_map_cv + grid_map_filters_rsl + + diff --git a/plane_segmentation/convex_plane_decomposition/src/ConvexRegionGrowing.cpp b/plane_segmentation/convex_plane_decomposition/src/ConvexRegionGrowing.cpp new file mode 100644 index 00000000..052a0c97 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/ConvexRegionGrowing.cpp @@ -0,0 +1,229 @@ +// +// Created by rgrandia on 09.06.20. +// + +#include "convex_plane_decomposition/ConvexRegionGrowing.h" + +#include "convex_plane_decomposition/GeometryUtils.h" + +namespace convex_plane_decomposition { + +namespace { + +int getCounterClockWiseNeighbour(int i, int lastVertex) { + return (i > 0) ? i - 1 : lastVertex; +} + +int getClockWiseNeighbour(int i, int lastVertex) { + return (i < lastVertex) ? i + 1 : 0; +} + +std::array getNeighbours(const CgalPolygon2d& polygon, int i) { + assert(i < polygon.size()); + assert(polygon.size() > 1); + int lastVertex = static_cast(polygon.size()) - 1; + int cwNeighbour = getClockWiseNeighbour(i, lastVertex); + int ccwNeighbour = getCounterClockWiseNeighbour(i, lastVertex); + return {polygon.vertex(cwNeighbour), polygon.vertex(ccwNeighbour)}; +} + +std::array get2ndNeighbours(const CgalPolygon2d& polygon, int i) { + assert(i < polygon.size()); + assert(polygon.size() > 1); + int lastVertex = static_cast(polygon.size()) - 1; + int cwNeighbour1 = getClockWiseNeighbour(i, lastVertex); + int cwNeighbour2 = getClockWiseNeighbour(cwNeighbour1, lastVertex); + int ccwNeighbour1 = getCounterClockWiseNeighbour(i, lastVertex); + int ccwNeighbour2 = getCounterClockWiseNeighbour(ccwNeighbour1, lastVertex); + return {polygon.vertex(cwNeighbour2), polygon.vertex(cwNeighbour1), polygon.vertex(ccwNeighbour1), polygon.vertex(ccwNeighbour2)}; +} + +bool remainsConvexWhenMovingPoint(const CgalPolygon2d& polygon, int i, const CgalPoint2d& point) { + auto secondNeighbours = get2ndNeighbours(polygon, i); + using CgalPolygon2dFixedSize = CGAL::Polygon_2>; + + CgalPolygon2dFixedSize subPolygon; + subPolygon.container()[0] = secondNeighbours[0]; + subPolygon.container()[1] = secondNeighbours[1]; + subPolygon.container()[2] = point; + subPolygon.container()[3] = secondNeighbours[2]; + subPolygon.container()[4] = secondNeighbours[3]; + return subPolygon.is_convex(); +} + +bool pointAndNeighboursAreWithinFreeSphere(const std::array& neighbours, const CgalPoint2d& point, + const CgalCircle2d& circle) { + return isInside(neighbours[0], circle) && isInside(point, circle) && isInside(neighbours[1], circle); +} + +/** + * Returns {true, 0.0} if either one of the point -> neighbour segments intersects the parent shape + * Returns {false, minSquareDistance} if none of the segments intersects. minSquareDistance is the minimum square distance between the + * point and the parent shape + */ +template +std::pair doEdgesIntersectAndSquareDistance(const std::array& neighbours, const CgalPoint2d& point, + const T& parentShape); + +template <> +std::pair doEdgesIntersectAndSquareDistance(const std::array& neighbours, const CgalPoint2d& point, + const CgalPolygon2d& parentShape) { + CgalSegment2d segment0{neighbours[0], point}; + CgalSegment2d segment1{neighbours[1], point}; + + double minDistSquared = std::numeric_limits::max(); + for (auto edgeIt = parentShape.edges_begin(); edgeIt != parentShape.edges_end(); ++edgeIt) { + const auto edge = *edgeIt; + if (CGAL::do_intersect(segment0, edge) || CGAL::do_intersect(segment1, edge)) { + return {true, 0.0}; + } else { + minDistSquared = std::min(minDistSquared, CGAL::squared_distance(point, edge)); + } + } + + return {false, minDistSquared}; +} + +template <> +std::pair doEdgesIntersectAndSquareDistance(const std::array& neighbours, const CgalPoint2d& point, + const CgalPolygonWithHoles2d& parentShape) { + const auto intersectAndDistance = doEdgesIntersectAndSquareDistance(neighbours, point, parentShape.outer_boundary()); + if (intersectAndDistance.first) { + return {true, 0.0}; + } + + double minDistSquared = intersectAndDistance.second; + for (const auto& hole : parentShape.holes()) { + const auto holeIntersectAndDistance = doEdgesIntersectAndSquareDistance(neighbours, point, hole); + if (holeIntersectAndDistance.first) { + return {true, 0.0}; + } else { + minDistSquared = std::min(minDistSquared, holeIntersectAndDistance.second); + } + } + + return {false, minDistSquared}; +} + +template +bool pointCanBeMoved(const CgalPolygon2d& growthShape, int i, const CgalPoint2d& candidatePoint, CgalCircle2d& freeSphere, + const T& parentShape) { + if (remainsConvexWhenMovingPoint(growthShape, i, candidatePoint)) { + auto neighbours = getNeighbours(growthShape, i); + if (pointAndNeighboursAreWithinFreeSphere(neighbours, candidatePoint, freeSphere)) { + return true; + } else { + // Look for intersections and minimum distances simultaneously + const auto intersectAndDistance = doEdgesIntersectAndSquareDistance(neighbours, candidatePoint, parentShape); + + if (intersectAndDistance.first) { + return false; + } else { + // Update free sphere around new point + freeSphere = CgalCircle2d(candidatePoint, intersectAndDistance.second); + return true; + } + } + } else { + return false; + } +} + +inline std::ostream& operator<<(std::ostream& os, const CgalPolygon2d& p) { + os << "CgalPolygon2d: \n"; + for (auto it = p.vertices_begin(); it != p.vertices_end(); ++it) { + os << "\t(" << it->x() << ", " << it->y() << ")\n"; + } + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const CgalPolygonWithHoles2d& p) { + os << "CgalPolygonWithHoles2d: \n"; + os << "\t" << p.outer_boundary() << "\n"; + os << "\tHoles: \n"; + for (auto it = p.holes_begin(); it != p.holes_end(); ++it) { + os << "\t\t" << *it << "\n"; + } + return os; +} + +template +CgalPolygon2d growConvexPolygonInsideShape_impl(const T& parentShape, CgalPoint2d center, int numberOfVertices, double growthFactor) { + const auto centerCopy = center; + constexpr double initialRadiusFactor = 0.999; + constexpr int maxIter = 1000; + double radius = initialRadiusFactor * distance(center, parentShape); + + CgalPolygon2d growthShape = createRegularPolygon(center, radius, numberOfVertices); + + if (radius == 0.0) { + std::cerr << "[growConvexPolygonInsideShape] Zero initial radius. Provide a point with a non-zero offset to the boundary.\n"; + return growthShape; + } + + // Cached values per vertex + std::vector blocked(numberOfVertices, false); + std::vector freeSpheres(numberOfVertices, CgalCircle2d(center, radius * radius)); + + int Nblocked = 0; + int iter = 0; + while (Nblocked < numberOfVertices && iter < maxIter) { + for (int i = 0; i < numberOfVertices; i++) { + if (!blocked[i]) { + const auto candidatePoint = getPointOnLine(center, growthShape.vertex(i), growthFactor); + if (pointCanBeMoved(growthShape, i, candidatePoint, freeSpheres[i], parentShape)) { + updateMean(center, growthShape.vertex(i), candidatePoint, numberOfVertices); + growthShape.vertex(i) = candidatePoint; + } else { + blocked[i] = true; + ++Nblocked; + } + } + } + ++iter; + } + + if (iter == maxIter) { + std::cerr << "[growConvexPolygonInsideShape] max iteration in region growing! Debug information: \n"; + std::cerr << "numberOfVertices: " << numberOfVertices << "\n"; + std::cerr << "growthFactor: " << growthFactor << "\n"; + std::cerr << "Center: " << centerCopy.x() << ", " << centerCopy.y() << "\n"; + std::cerr << parentShape << "\n"; + } + return growthShape; +} + +} // namespace + +CgalPolygon2d createRegularPolygon(const CgalPoint2d& center, double radius, int numberOfVertices) { + assert(numberOfVertices > 2); + CgalPolygon2d polygon; + polygon.container().reserve(numberOfVertices); + double angle = (2. * M_PI) / numberOfVertices; + for (int i = 0; i < numberOfVertices; ++i) { + double phi = i * angle; + double px = radius * std::cos(phi) + center.x(); + double py = radius * std::sin(phi) + center.y(); + // Counter clockwise + polygon.push_back({px, py}); + } + return polygon; +} + +CgalPolygon2d growConvexPolygonInsideShape(const CgalPolygon2d& parentShape, CgalPoint2d center, int numberOfVertices, + double growthFactor) { + return growConvexPolygonInsideShape_impl(parentShape, center, numberOfVertices, growthFactor); +} + +CgalPolygon2d growConvexPolygonInsideShape(const CgalPolygonWithHoles2d& parentShape, CgalPoint2d center, int numberOfVertices, + double growthFactor) { + return growConvexPolygonInsideShape_impl(parentShape, center, numberOfVertices, growthFactor); +} + +void updateMean(CgalPoint2d& mean, const CgalPoint2d& oldValue, const CgalPoint2d& updatedValue, int N) { + // old_mean = 1/N * ( others + old_value); -> others = N*old_mean - old_value + // new_mean = 1/N * ( others + new_value); -> new_mean = old_mean - 1/N * oldValue + 1/N * updatedValue + mean += 1.0 / N * (updatedValue - oldValue); +} + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/src/Draw.cpp b/plane_segmentation/convex_plane_decomposition/src/Draw.cpp new file mode 100644 index 00000000..3c20b8c4 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/Draw.cpp @@ -0,0 +1,60 @@ +// +// Created by rgrandia on 09.06.20. +// + +#include "convex_plane_decomposition/Draw.h" + +namespace convex_plane_decomposition { + +cv::Vec3b randomColor() { + return cv::Vec3b((rand() & 255), (rand() & 255), (rand() & 255)); +} + +std::vector toCv(const CgalPolygon2d& polygon) { + std::vector contour; + contour.reserve(polygon.size()); + for (const auto& point : polygon) { + contour.emplace_back(point.x(), point.y()); + } + return contour; +} + +void drawContour(cv::Mat& img, const CgalPoint2d& point, double radius, const cv::Vec3b* color) { + const cv::Vec3b contourColor = (color == nullptr) ? randomColor() : *color; + cv::Point cvPoint(point.x(), point.y()); + cv::circle(img, cvPoint, radius, contourColor); +} + +void drawContour(cv::Mat& img, const CgalPolygon2d& polygon, const cv::Vec3b* color) { + const cv::Vec3b contourColor = (color == nullptr) ? randomColor() : *color; + std::vector> contours{toCv(polygon)}; + drawContours(img, contours, 0, contourColor); +} + +void drawContour(cv::Mat& img, const CgalPolygonWithHoles2d& polygonWithHoles2d, const cv::Vec3b* color) { + const cv::Vec3b contourColor = (color == nullptr) ? randomColor() : *color; + + drawContour(img, polygonWithHoles2d.outer_boundary(), &contourColor); + for (const auto& hole : polygonWithHoles2d.holes()) { + drawContour(img, hole, &contourColor); + } +} + +CgalPolygon2d scaleShape(const CgalPolygon2d& polygon, double scale) { + CgalPolygon2d scaledShape; + scaledShape.container().reserve(polygon.size()); + for (const auto& point : polygon) { + scaledShape.push_back({scale * point.x(), scale * point.y()}); + } + return scaledShape; +} +CgalPolygonWithHoles2d scaleShape(const CgalPolygonWithHoles2d& polygonWithHoles, double scale) { + CgalPolygonWithHoles2d scaledShape(scaleShape(polygonWithHoles.outer_boundary(), scale)); + + for (const auto& hole : polygonWithHoles.holes()) { + scaledShape.add_hole(scaleShape(hole, scale)); + } + return scaledShape; +} + +} \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/src/GridMapPreprocessing.cpp b/plane_segmentation/convex_plane_decomposition/src/GridMapPreprocessing.cpp new file mode 100644 index 00000000..265918e6 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/GridMapPreprocessing.cpp @@ -0,0 +1,52 @@ +#include "convex_plane_decomposition/GridMapPreprocessing.h" + +#include +#include +#include + +#include +#include + +namespace convex_plane_decomposition { + +GridMapPreprocessing::GridMapPreprocessing(const PreprocessingParameters& parameters) : parameters_(parameters) {} + +void GridMapPreprocessing::preprocess(grid_map::GridMap& gridMap, const std::string& layer) const { + inpaint(gridMap, layer); + denoise(gridMap, layer); + changeResolution(gridMap, layer); +} + +void GridMapPreprocessing::denoise(grid_map::GridMap& gridMap, const std::string& layer) const { + int kernelSize = std::max(1, std::min(parameters_.kernelSize, 5)); // must be 1, 3 or 5 for current image type, see doc of cv::medianBlur + grid_map::smoothing::median(gridMap, layer, layer, kernelSize, 0, parameters_.numberOfRepeats); +} + +void GridMapPreprocessing::changeResolution(grid_map::GridMap& gridMap, const std::string& layer) const { + bool hasSameResolution = std::abs(gridMap.getResolution() - parameters_.resolution) < 1e-6; + + if (parameters_.resolution > 0.0 && !hasSameResolution) { + grid_map::inpainting::resample(gridMap, layer, parameters_.resolution); + } +} + +void GridMapPreprocessing::inpaint(grid_map::GridMap& gridMap, const std::string& layer) const { + const std::string& layerOut = "tmp"; + grid_map::inpainting::minValues(gridMap, layer, layerOut); + + gridMap.get(layer) = std::move(gridMap.get(layerOut)); + gridMap.erase(layerOut); +} + +bool containsFiniteValue(const grid_map::Matrix& map) { + for (int col = 0; col < map.cols(); ++col) { + for (int row = 0; row < map.rows(); ++row) { + if (std::isfinite(map(col, row))) { + return true; + } + } + } + return false; +} + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/src/LoadGridmapFromImage.cpp b/plane_segmentation/convex_plane_decomposition/src/LoadGridmapFromImage.cpp new file mode 100644 index 00000000..2f1fc783 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/LoadGridmapFromImage.cpp @@ -0,0 +1,36 @@ +// +// Created by rgrandia on 16.03.22. +// + +#include "convex_plane_decomposition/LoadGridmapFromImage.h" + +#include +#include + +#include + +namespace convex_plane_decomposition { + +grid_map::GridMap loadGridmapFromImage(const std::string& filePath, const std::string& elevationLayer, const std::string& frameId, + double resolution, double scale) { + // Read the file + cv::Mat image; + image = cv::imread(filePath, cv::ImreadModes::IMREAD_GRAYSCALE); + + // Check for invalid input + if (!image.data) { + throw std::runtime_error("Could not open or find the image"); + } + + // Min max values + double minValue, maxValue; + cv::minMaxLoc(image, &minValue, &maxValue); + + grid_map::GridMap mapOut({elevationLayer}); + mapOut.setFrameId(frameId); + grid_map::GridMapCvConverter::initializeFromImage(image, resolution, mapOut, grid_map::Position(0.0, 0.0)); + grid_map::GridMapCvConverter::addLayerFromImage(image, elevationLayer, mapOut, float(0.0), float(scale), 0.5); + return mapOut; +} + +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/src/PlanarRegion.cpp b/plane_segmentation/convex_plane_decomposition/src/PlanarRegion.cpp new file mode 100644 index 00000000..f0aa09f6 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/PlanarRegion.cpp @@ -0,0 +1,59 @@ +#include "convex_plane_decomposition/PlanarRegion.h" + +namespace convex_plane_decomposition { + +Eigen::Isometry3d getTransformLocalToGlobal(const NormalAndPosition& normalAndPosition) { + const Eigen::Vector3d zAxisInGlobal = normalAndPosition.normal.normalized(); + const auto& positionInGlobal = normalAndPosition.position; + + // Cross with any vector that is not equal to surfaceNormal + Eigen::Vector3d yAxisInGlobal = zAxisInGlobal.cross(Eigen::Vector3d::UnitX()); + { // Normalize the yAxis. Need to pick a different direction if z happened to intersect with unitX + const auto ySquaredNorm = yAxisInGlobal.squaredNorm(); + const double crossTolerance = 1e-3; + if (ySquaredNorm > crossTolerance) { + yAxisInGlobal /= std::sqrt(ySquaredNorm); + } else { + // normal was almost equal to unitX. Pick the y-axis in a different way (approximately equal to unitY): + yAxisInGlobal = zAxisInGlobal.cross(Eigen::Vector3d::UnitY().cross(zAxisInGlobal)).normalized(); + } + } + + Eigen::Isometry3d transform; + transform.linear().col(0) = yAxisInGlobal.cross(zAxisInGlobal); + transform.linear().col(1) = yAxisInGlobal; + transform.linear().col(2) = zAxisInGlobal; + transform.translation() = positionInGlobal; + + return transform; +} + +CgalPoint2d projectToPlaneAlongGravity(const CgalPoint2d& worldFrameXY, const Eigen::Isometry3d& transformPlaneToWorld) { + // Shorthands + const auto& xAxis = transformPlaneToWorld.linear().col(0); + const auto& yAxis = transformPlaneToWorld.linear().col(1); + const auto& surfaceNormalInWorld = transformPlaneToWorld.linear().col(2); + const auto& planeOriginInWorld = transformPlaneToWorld.translation(); + + // Horizontal difference + const double dx = worldFrameXY.x() - planeOriginInWorld.x(); + const double dy = worldFrameXY.y() - planeOriginInWorld.y(); + + // Vertical difference + // solve surfaceNormalInWorld.dot(projectedPosition - planeOriginInWorld) = 0 + // with projectPosition XY = worldFrameXY; + Eigen::Vector3d planeOriginToProjectedPointInWorld( + dx, dy, (-dx * surfaceNormalInWorld.x() - dy * surfaceNormalInWorld.y()) / surfaceNormalInWorld.z()); + + // Project XY coordinates to the plane frame + return {xAxis.dot(planeOriginToProjectedPointInWorld), yAxis.dot(planeOriginToProjectedPointInWorld)}; +} + +Eigen::Vector3d positionInWorldFrameFromPosition2dInPlane(const CgalPoint2d& planeXY, const Eigen::Isometry3d& transformPlaneToWorld) { + // Compute transform given that z in plane = 0.0 + Eigen::Vector3d pointInWorld = transformPlaneToWorld.translation() + planeXY.x() * transformPlaneToWorld.linear().col(0) + + planeXY.y() * transformPlaneToWorld.linear().col(1); + return pointInWorld; +} + +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/src/PlaneDecompositionPipeline.cpp b/plane_segmentation/convex_plane_decomposition/src/PlaneDecompositionPipeline.cpp new file mode 100644 index 00000000..573fc580 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/PlaneDecompositionPipeline.cpp @@ -0,0 +1,45 @@ +#include "convex_plane_decomposition/PlaneDecompositionPipeline.h" + +#include + +namespace convex_plane_decomposition { + +PlaneDecompositionPipeline::PlaneDecompositionPipeline(const Config& config) + : preprocessing_(config.preprocessingParameters), + slidingWindowPlaneExtractor_(config.slidingWindowPlaneExtractorParameters, config.ransacPlaneExtractorParameters), + contourExtraction_(config.contourExtractionParameters), + postprocessing_(config.postprocessingParameters) {} + +void PlaneDecompositionPipeline::update(grid_map::GridMap&& gridMap, const std::string& elevationLayer) { + // Clear / Overwrite old result + planarTerrain_.planarRegions.clear(); + planarTerrain_.gridMap = std::move(gridMap); + + preprocessTimer_.startTimer(); + preprocessing_.preprocess(planarTerrain_.gridMap, elevationLayer); + preprocessTimer_.endTimer(); + + slidingWindowTimer_.startTimer(); + slidingWindowPlaneExtractor_.runExtraction(planarTerrain_.gridMap, elevationLayer); + slidingWindowTimer_.endTimer(); + + contourExtractionTimer_.startTimer(); + planarTerrain_.planarRegions = contourExtraction_.extractPlanarRegions(slidingWindowPlaneExtractor_.getSegmentedPlanesMap()); + contourExtractionTimer_.endTimer(); + + postprocessTimer_.startTimer(); + // Add binary map + const std::string planeClassificationLayer{"plane_classification"}; + planarTerrain_.gridMap.add(planeClassificationLayer); + auto& traversabilityMask = planarTerrain_.gridMap.get(planeClassificationLayer); + cv::cv2eigen(slidingWindowPlaneExtractor_.getBinaryLabeledImage(), traversabilityMask); + + postprocessing_.postprocess(planarTerrain_, elevationLayer, planeClassificationLayer); + postprocessTimer_.endTimer(); +} + +void PlaneDecompositionPipeline::getSegmentation(grid_map::GridMap::Matrix& segmentation) const { + cv::cv2eigen(slidingWindowPlaneExtractor_.getSegmentedPlanesMap().labeledImage, segmentation); +} + +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/src/Postprocessing.cpp b/plane_segmentation/convex_plane_decomposition/src/Postprocessing.cpp new file mode 100644 index 00000000..4b07aadf --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/Postprocessing.cpp @@ -0,0 +1,146 @@ +#include "convex_plane_decomposition/Postprocessing.h" + +#include +#include +#include + +#include +#include + +namespace convex_plane_decomposition { + +Postprocessing::Postprocessing(const PostprocessingParameters& parameters) : parameters_(parameters) {} + +void Postprocessing::postprocess(PlanarTerrain& planarTerrain, const std::string& elevationLayer, + const std::string& planeSegmentationLayer) const { + auto& elevationData = planarTerrain.gridMap.get(elevationLayer); + const auto& planarityMask = planarTerrain.gridMap.get(planeSegmentationLayer); + + // Store the unaltered map + planarTerrain.gridMap.add(elevationLayer + "_before_postprocess", elevationData); + + // post process planar regions + addHeightOffset(planarTerrain.planarRegions); + + // Add smooth layer for base reference + addSmoothLayer(planarTerrain.gridMap, elevationData, planarityMask); + + // post process elevation map + dilationInNonplanarRegions(elevationData, planarityMask); + addHeightOffset(elevationData, planarityMask); +} + +void Postprocessing::dilationInNonplanarRegions(Eigen::MatrixXf& elevationData, const Eigen::MatrixXf& planarityMask) const { + if (parameters_.nonplanar_horizontal_offset > 0) { + // Convert to opencv image + cv::Mat elevationImage; + cv::eigen2cv(elevationData, elevationImage); // creates CV_32F image + + // dilate + const int dilationSize = 2 * parameters_.nonplanar_horizontal_offset + 1; // + const int dilationType = cv::MORPH_ELLIPSE; // ellipse inscribed in the square of size dilationSize + const auto dilationKernel_ = cv::getStructuringElement(dilationType, cv::Size(dilationSize, dilationSize)); + cv::dilate(elevationImage, elevationImage, dilationKernel_, cv::Point(-1, -1), 1, cv::BORDER_REPLICATE); + + // convert back + Eigen::MatrixXf elevationDilated; + cv::cv2eigen(elevationImage, elevationDilated); + + // merge: original elevation for planar regions (mask = 1.0), dilated elevation for non-planar (mask = 0.0) + elevationData = planarityMask.array() * elevationData.array() + (1.0 - planarityMask.array()) * elevationDilated.array(); + } +} + +void Postprocessing::addHeightOffset(Eigen::MatrixXf& elevationData, const Eigen::MatrixXf& planarityMask) const { + // lift elevation layer. For untraversable offset we first add the offset everywhere and substract it again in traversable regions. + if (parameters_.extracted_planes_height_offset != 0.0 || parameters_.nonplanar_height_offset != 0.0) { + elevationData.array() += (parameters_.extracted_planes_height_offset + parameters_.nonplanar_height_offset); + + if (parameters_.nonplanar_height_offset != 0.0) { + elevationData.noalias() -= parameters_.nonplanar_height_offset * planarityMask; + } + } +} + +void Postprocessing::addHeightOffset(std::vector& planarRegions) const { + if (parameters_.extracted_planes_height_offset != 0.0) { + for (auto& planarRegion : planarRegions) { + planarRegion.transformPlaneToWorld.translation().z() += parameters_.extracted_planes_height_offset; + } + } +} + +void Postprocessing::addSmoothLayer(grid_map::GridMap& gridMap, const Eigen::MatrixXf& elevationData, + const Eigen::MatrixXf& planarityMask) const { + auto kernelSizeInPixels = [res = gridMap.getResolution()](double realSize) { + return 2 * static_cast(std::round(realSize / res)) + 1; + }; + + // Helper to convert to Eigen + auto toEigen = [](const cv::Mat& image) { + Eigen::MatrixXf data; + cv::cv2eigen(image, data); + return data; + }; + + // Helper to openCV + auto toCv = [](const Eigen::MatrixXf& data) { + cv::Mat image; + cv::eigen2cv(data, image); + return image; + }; + + const int dilationSize = kernelSizeInPixels(parameters_.smoothing_dilation_size); + const int kernel = kernelSizeInPixels(parameters_.smoothing_box_kernel_size); + const int kernelGauss = kernelSizeInPixels(parameters_.smoothing_gauss_kernel_size); + + + // Set non planar regions to NaN + Eigen::MatrixXf elevationWithNaN = + (planarityMask.array() == 1.0) + .select(elevationData, Eigen::MatrixXf::Constant(elevationData.rows(), elevationData.cols(), NAN)); + gridMap.add("elevationWithNaN", elevationWithNaN); + + // Inpainting + grid_map::inpainting::minValues(gridMap, "elevationWithNaN", "elevationWithNaN_i"); + + // Closing + auto elevationWithNaNCv = toCv(gridMap.get("elevationWithNaN_i")); + const int dilationType = cv::MORPH_ELLIPSE; // ellipse inscribed in the square of size dilationSize + const auto dilationKernel_ = cv::getStructuringElement(dilationType, cv::Size(dilationSize, dilationSize)); + cv::morphologyEx(elevationWithNaNCv, elevationWithNaNCv, cv::MORPH_CLOSE, dilationKernel_, cv::Point(-1, -1), 1, cv::BORDER_REPLICATE); + + gridMap.add("elevationWithNaNClosed", toEigen(elevationWithNaNCv)); + + // Dilation with a slope of 45 deg + int halfDilationSize = (dilationSize - 1) / 2; + float slope = gridMap.getResolution(); // dh dpixel + Eigen::MatrixXf offsets(dilationSize, dilationSize); + for (int i = 0; i < dilationSize; ++i) { + for (int j = 0; j < dilationSize; ++j) { + const auto dx = (i - halfDilationSize); + const auto dy = (j - halfDilationSize); + offsets(i, j) = slope * std::sqrt(dx * dx + dy * dy); + } + } + grid_map::processing::applyKernelFunction( + gridMap, "elevationWithNaNClosed", "elevationWithNaNClosedDilated", dilationSize, + [&](const Eigen::Ref& data) -> float { return (data - offsets).maxCoeffOfFinites(); }); + + // Convert to openCV + auto elevationWithNaNImage = toCv(gridMap.get("elevationWithNaNClosedDilated")); + + // Filter definition + auto smoothingFilter = [kernel, kernelGauss](const cv::Mat& imageIn) { + cv::Mat imageOut; + cv::boxFilter(imageIn, imageOut, -1, {kernel, kernel}, cv::Point(-1, -1), true, cv::BorderTypes::BORDER_REPLICATE); + cv::GaussianBlur(imageOut, imageOut, {kernelGauss, kernelGauss}, 0, 0, cv::BorderTypes::BORDER_REPLICATE); + return imageOut; + }; + auto smooth_planar = smoothingFilter(elevationWithNaNImage); + + // Add layer to map. + gridMap.add("smooth_planar", toEigen(smooth_planar)); +} + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/src/SegmentedPlaneProjection.cpp b/plane_segmentation/convex_plane_decomposition/src/SegmentedPlaneProjection.cpp new file mode 100644 index 00000000..ca4a4988 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/SegmentedPlaneProjection.cpp @@ -0,0 +1,150 @@ +// +// Created by rgrandia on 12.10.21. +// + +#include "convex_plane_decomposition/SegmentedPlaneProjection.h" + +#include "convex_plane_decomposition/GeometryUtils.h" + +namespace convex_plane_decomposition { + +namespace { // Helper functions that only make sense in this context + +double distanceCost(const Eigen::Vector3d& query, const Eigen::Vector3d& terrainPoint) { + const double dx = query.x() - terrainPoint.x(); + const double dy = query.y() - terrainPoint.y(); + const double dz = query.z() - terrainPoint.z(); + return dx * dx + dy * dy + dz * dz; +} + +double distanceCostLowerbound(double distanceSquared) { + return distanceSquared; +} + +double intervalSquareDistance(double value, double min, double max) { + // - | 0 | + + // min max + // Returns 0.0 if between min and max. Returns the distance to one boundary otherwise. + if (value < min) { + double diff = min - value; + return diff * diff; + } else if (value < max) { + return 0.0; + } else { + double diff = max - value; + return diff * diff; + } +} + +double squaredDistanceToBoundingBox(const CgalPoint2d& point, const CgalBbox2d& boundingBox) { + const double dxdx = intervalSquareDistance(point.x(), boundingBox.xmin(), boundingBox.xmax()); + const double dydy = intervalSquareDistance(point.y(), boundingBox.ymin(), boundingBox.ymax()); + return dxdx + dydy; +} + +const CgalPolygonWithHoles2d* findInsetContainingThePoint(const CgalPoint2d& point, const std::vector& insets) { + for (const auto& inset : insets) { + if (isInside(point, inset.outer_boundary())) { + return &inset; + } + } + return nullptr; +} + +} // namespace + +CgalPoint2d projectToPlanarRegion(const CgalPoint2d& queryProjectedToPlane, const PlanarRegion& planarRegion) { + // First search if the projected point is inside any of the insets. + // Note: we know that all insets are non-overlapping, and are not nested (no shape is contained in the hole of another shape) + const auto* const insetPtrContainingPoint = findInsetContainingThePoint(queryProjectedToPlane, planarRegion.boundaryWithInset.insets); + + // Compute the projection + CgalPoint2d projectedPoint; + if (insetPtrContainingPoint == nullptr) { + // Not inside any of the insets. Need to look for the closest one. The projection will be to the boundary + double minDistSquared = std::numeric_limits::max(); + for (const auto& inset : planarRegion.boundaryWithInset.insets) { + const auto closestPoint = projectToClosestPoint(queryProjectedToPlane, inset.outer_boundary()); + double distSquare = squaredDistance(closestPoint, queryProjectedToPlane); + if (distSquare < minDistSquared) { + projectedPoint = closestPoint; + minDistSquared = distSquare; + } + } + } else { + // Query point is inside and does not need projection... + projectedPoint = queryProjectedToPlane; + + // ... unless it is inside a hole + for (const auto& hole : insetPtrContainingPoint->holes()) { + if (isInside(queryProjectedToPlane, hole)) { + projectedPoint = projectToClosestPoint(queryProjectedToPlane, hole); + break; // No need to search other holes. Holes are not overlapping + } + } + } + + return projectedPoint; +} + +std::vector sortWithBoundingBoxes(const Eigen::Vector3d& positionInWorld, + const std::vector& planarRegions) { + // Compute distance to bounding boxes + std::vector regionsAndBboxSquareDistances; + regionsAndBboxSquareDistances.reserve(planarRegions.size()); + for (const auto& planarRegion : planarRegions) { + const Eigen::Vector3d positionInTerrainFrame = planarRegion.transformPlaneToWorld.inverse() * positionInWorld; + const double dzdz = positionInTerrainFrame.z() * positionInTerrainFrame.z(); + + RegionSortingInfo regionSortingInfo; + regionSortingInfo.regionPtr = &planarRegion; + regionSortingInfo.positionInTerrainFrame = {positionInTerrainFrame.x(), positionInTerrainFrame.y()}; + regionSortingInfo.boundingBoxSquareDistance = + squaredDistanceToBoundingBox(regionSortingInfo.positionInTerrainFrame, planarRegion.bbox2d) + dzdz; + + regionsAndBboxSquareDistances.push_back(regionSortingInfo); + } + + // Sort regions close to far + std::sort(regionsAndBboxSquareDistances.begin(), regionsAndBboxSquareDistances.end(), + [](const RegionSortingInfo& lhs, const RegionSortingInfo& rhs) { + return lhs.boundingBoxSquareDistance < rhs.boundingBoxSquareDistance; + }); + + return regionsAndBboxSquareDistances; +} + +PlanarTerrainProjection getBestPlanarRegionAtPositionInWorld(const Eigen::Vector3d& positionInWorld, + const std::vector& planarRegions, + const std::function& penaltyFunction) { + const auto sortedRegions = sortWithBoundingBoxes(positionInWorld, planarRegions); + + // Look for closest planar region. + PlanarTerrainProjection projection; + projection.cost = std::numeric_limits::max(); + for (const auto& regionInfo : sortedRegions) { + // Skip based on lower bound + if (distanceCostLowerbound(regionInfo.boundingBoxSquareDistance) > projection.cost) { + continue; + } + + // Project onto planar region + const auto projectedPointInTerrainFrame = projectToPlanarRegion(regionInfo.positionInTerrainFrame, *regionInfo.regionPtr); + + // Express projected point in World frame + const auto projectionInWorldFrame = + positionInWorldFrameFromPosition2dInPlane(projectedPointInTerrainFrame, regionInfo.regionPtr->transformPlaneToWorld); + + const auto cost = distanceCost(positionInWorld, projectionInWorldFrame) + penaltyFunction(projectionInWorldFrame); + if (cost < projection.cost) { + projection.cost = cost; + projection.regionPtr = regionInfo.regionPtr; + projection.positionInTerrainFrame = projectedPointInTerrainFrame; + projection.positionInWorld = projectionInWorldFrame; + } + } + + return projection; +} + +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/src/contour_extraction/ContourExtraction.cpp b/plane_segmentation/convex_plane_decomposition/src/contour_extraction/ContourExtraction.cpp new file mode 100644 index 00000000..ee5a3fc9 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/contour_extraction/ContourExtraction.cpp @@ -0,0 +1,145 @@ +// +// Created by rgrandia on 04.06.20. +// + +#include "convex_plane_decomposition/contour_extraction/ContourExtraction.h" +#include "convex_plane_decomposition/contour_extraction/Upsampling.h" + +#include +#include + +namespace convex_plane_decomposition { +namespace contour_extraction { + +ContourExtraction::ContourExtraction(const ContourExtractionParameters& parameters) + : parameters_(parameters), binaryImage_(cv::Size(0, 0), CV_8UC1) { + { + int erosionSize = 1; // single sided length of the kernel + int erosionType = cv::MORPH_CROSS; + insetKernel_ = cv::getStructuringElement(erosionType, cv::Size(2 * erosionSize + 1, 2 * erosionSize + 1)); + } + { + int erosionSize = 1 + parameters_.marginSize; // single sided length of the kernel + int erosionType = cv::MORPH_ELLIPSE; + marginKernel_ = cv::getStructuringElement(erosionType, cv::Size(2 * erosionSize + 1, 2 * erosionSize + 1)); + } +} + +std::vector ContourExtraction::extractPlanarRegions(const SegmentedPlanesMap& segmentedPlanesMap) { + const auto upSampledMap = upSample(segmentedPlanesMap); + + std::vector planarRegions; + planarRegions.reserve(upSampledMap.highestLabel + 1); // Can be more or less in the end if regions are split or removed. + for (const auto& label_plane : upSampledMap.labelPlaneParameters) { + const int label = label_plane.first; + binaryImage_ = upSampledMap.labeledImage == label; + + // Try with safety margin + cv::erode(binaryImage_, binaryImage_, marginKernel_, cv::Point(-1,-1), 1, cv::BORDER_REPLICATE); + auto boundariesAndInsets = contour_extraction::extractBoundaryAndInset(binaryImage_, insetKernel_); + + // If safety margin makes the region disappear -> try without + if (boundariesAndInsets.empty()) { + binaryImage_ = upSampledMap.labeledImage == label; + // still 1 pixel erosion to remove the growth after upsampling + cv::erode(binaryImage_, binaryImage_, insetKernel_, cv::Point(-1,-1), 1, cv::BORDER_REPLICATE); + boundariesAndInsets = contour_extraction::extractBoundaryAndInset(binaryImage_, insetKernel_); + } + + const auto plane_parameters = getTransformLocalToGlobal(label_plane.second); + for (auto& boundaryAndInset : boundariesAndInsets) { + // Transform points from pixel space to local terrain frame + transformInPlace(boundaryAndInset, [&](CgalPoint2d& point) { + auto pointInWorld = pixelToWorldFrame(point, upSampledMap.resolution, upSampledMap.mapOrigin); + point = projectToPlaneAlongGravity(pointInWorld, plane_parameters); + }); + + PlanarRegion planarRegion; + planarRegion.boundaryWithInset = std::move(boundaryAndInset); + planarRegion.transformPlaneToWorld = plane_parameters; + planarRegion.bbox2d = planarRegion.boundaryWithInset.boundary.outer_boundary().bbox(); + planarRegions.push_back(std::move(planarRegion)); + } + } + return planarRegions; +} + +std::vector extractBoundaryAndInset(cv::Mat& binary_image, const cv::Mat& erosionKernel) { + // Get boundary + std::vector boundaries = extractPolygonsFromBinaryImage(binary_image); + + // Erode + cv::erode(binary_image, binary_image, erosionKernel, cv::Point(-1,-1), 1, cv::BORDER_REPLICATE); + + // Erosion of the edge of the map + binary_image.row(0) = 0; + binary_image.row(binary_image.rows - 1) = 0; + binary_image.col(0) = 0; + binary_image.col(binary_image.cols - 1) = 0; + + // Get insets + std::vector insets = extractPolygonsFromBinaryImage(binary_image); + + // Associate boundaries with insets + std::vector boundariesWithInsets; + for (const auto& boundary : boundaries) { + std::vector assignedInsets; + for (const auto& inset : insets) { + if (isInside(inset.outer_boundary().vertex(0), boundary)) { + assignedInsets.push_back(inset); + } + } + + if (!assignedInsets.empty()) { + BoundaryWithInset boundaryWithInset; + boundaryWithInset.boundary = boundary; + boundaryWithInset.insets = assignedInsets; + boundariesWithInsets.push_back(std::move(boundaryWithInset)); + } + } + return boundariesWithInsets; +} + +std::vector extractPolygonsFromBinaryImage(const cv::Mat& binary_image) { + std::vector> contours; + std::vector hierarchy; // [Next, Previous, First_Child, Parent] + auto isOuterContour = [](const cv::Vec4i& hierarchyVector) { + return hierarchyVector[3] < 0; // no parent + }; + + cv::findContours(binary_image, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE); + + std::vector plane_polygons; + for (int i = 0; i < contours.size(); i++) { + if (isOuterContour(hierarchy[i]) && contours[i].size() > 1) { + CgalPolygonWithHoles2d polygon; + polygon.outer_boundary() = cgalPolygonFromOpenCv(contours[i]); + + // Add children as holes + int childIndex = hierarchy[i][2]; // First child + while (childIndex > 0) { + polygon.add_hole(cgalPolygonFromOpenCv(contours[childIndex])); + childIndex = hierarchy[childIndex][0]; // Next child + } + plane_polygons.push_back(std::move(polygon)); + } + } + return plane_polygons; +} + +CgalPolygon2d cgalPolygonFromOpenCv(const std::vector& openCvPolygon) { + CgalPolygon2d polygon; + polygon.container().reserve(openCvPolygon.size()); + for (const auto& point : openCvPolygon) { + polygon.container().emplace_back(point.x, point.y); + } + return polygon; +} + +CgalPoint2d pixelToWorldFrame(const CgalPoint2d& pixelspaceCgalPoint2d, double resolution, const Eigen::Vector2d& mapOffset) { + // Notice the transpose of x and y! + return {mapOffset.x() - resolution * pixelspaceCgalPoint2d.y(), mapOffset.y() - resolution * pixelspaceCgalPoint2d.x()}; +} + +} // namespace contour_extraction +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/src/contour_extraction/Upsampling.cpp b/plane_segmentation/convex_plane_decomposition/src/contour_extraction/Upsampling.cpp new file mode 100644 index 00000000..a742d069 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/contour_extraction/Upsampling.cpp @@ -0,0 +1,71 @@ +// +// Created by rgrandia on 26.10.21. +// + +#include "convex_plane_decomposition/contour_extraction/Upsampling.h" + +namespace convex_plane_decomposition { +namespace contour_extraction { + +namespace { +// Helper function that upsamples a single column. Writes into a target that is already allocated with the right size +void upSampleColumn(const cv::Mat& column, cv::Mat& target, int col, int rows, int upsampledRows) { + for (int row = 0; row < rows; ++row) { + const auto value = column.at(row); + if (row == 0) { + target.at(0, col) = value; + target.at(1, col) = value; + } else if (row + 1 == rows) { + target.at(upsampledRows - 2, col) = value; + target.at(upsampledRows - 1, col) = value; + } else { + const int targetRow = 2 + 3 * (row - 1); + target.at(targetRow, col) = value; + target.at(targetRow + 1, col) = value; + target.at(targetRow + 2, col) = value; + } + } +} +} // namespace + +cv::Mat upSample(const cv::Mat& image) { + const int rows = image.rows; + const int cols = image.cols; + assert(rows >= 2); + assert(cols >= 2); + const int upsampledRows = 4 + 3 * (rows - 2); + const int upsampledCols = 4 + 3 * (cols - 2); + + cv::Mat result(upsampledRows, upsampledCols, image.type()); + + for (int col = 0; col < cols; ++col) { + const auto& column = image.col(col); + if (col == 0) { + upSampleColumn(column, result, 0, rows, upsampledRows); + result.col(0).copyTo(result.col(1)); + } else if (col + 1 == cols) { + upSampleColumn(column, result, upsampledCols - 2, rows, upsampledRows); + result.col(upsampledCols - 2).copyTo(result.col(upsampledCols - 1)); + } else { + const int targetCol = 2 + 3 * (col - 1); + upSampleColumn(column, result, targetCol, rows, upsampledRows); + result.col(targetCol).copyTo(result.col(targetCol + 1)); + result.col(targetCol + 1).copyTo(result.col(targetCol + 2)); + } + } + + return result; +} + +SegmentedPlanesMap upSample(const SegmentedPlanesMap& mapIn) { + SegmentedPlanesMap mapOut; + mapOut.labelPlaneParameters = mapIn.labelPlaneParameters; + mapOut.labeledImage = upSample(mapIn.labeledImage); + mapOut.resolution = mapIn.resolution / 3.0; + mapOut.mapOrigin = mapIn.mapOrigin; + mapOut.highestLabel = mapIn.highestLabel; + return mapOut; +} + +} // namespace contour_extraction +} // namespace convex_plane_decomposition \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/src/ransac/RansacPlaneExtractor.cpp b/plane_segmentation/convex_plane_decomposition/src/ransac/RansacPlaneExtractor.cpp new file mode 100644 index 00000000..98dc720e --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/ransac/RansacPlaneExtractor.cpp @@ -0,0 +1,39 @@ +#include "convex_plane_decomposition/ransac/RansacPlaneExtractor.hpp" + +namespace ransac_plane_extractor { + +RansacPlaneExtractor::RansacPlaneExtractor(const RansacPlaneExtractorParameters& parameters) { + setParameters(parameters); + ransac_.add_shape_factory(); +} + +void RansacPlaneExtractor::setParameters(const RansacPlaneExtractorParameters& parameters) { + cgalRansacParameters_.probability = parameters.probability; + cgalRansacParameters_.min_points = parameters.min_points; + cgalRansacParameters_.epsilon = parameters.epsilon / 3.0; // CGAL ransac puts the inlier tolerance at 3 times epsilon + cgalRansacParameters_.cluster_epsilon = parameters.cluster_epsilon; + cgalRansacParameters_.normal_threshold = std::cos(parameters.normal_threshold * M_PI / 180.0); +} + +void RansacPlaneExtractor::detectPlanes(std::vector& points_with_normal) { + ransac_.set_input(points_with_normal); + ransac_.detect(cgalRansacParameters_); +} + +std::pair RansacPlaneExtractor::getPlaneParameters(Shape* shapePtr) { + const auto* planePtr = static_cast(shapePtr); + + // Get Normal, pointing upwards + Eigen::Vector3d normalVector(planePtr->plane_normal().x(), planePtr->plane_normal().y(), planePtr->plane_normal().z()); + if (normalVector.z() < 0.0) { + normalVector = -normalVector; + } + + // Project origin to get a point on the plane. + const auto support = planePtr->projection({0.0, 0.0, 0.0}); + const Eigen::Vector3d supportVector(support.x(), support.y(), support.z()); + + return {normalVector, supportVector}; +} + +} // namespace ransac_plane_extractor diff --git a/plane_segmentation/convex_plane_decomposition/src/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.cpp b/plane_segmentation/convex_plane_decomposition/src/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.cpp new file mode 100644 index 00000000..04c488d6 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/src/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.cpp @@ -0,0 +1,311 @@ +#include "convex_plane_decomposition/sliding_window_plane_extraction/SlidingWindowPlaneExtractor.h" + +#include +#include +#include + +#include + +#include +#include + +#include + +namespace convex_plane_decomposition { +namespace sliding_window_plane_extractor { + +namespace { + +std::pair normalAndErrorFromCovariance(int numPoint, const Eigen::Vector3d& mean, + const Eigen::Matrix3d& sumSquared) { + const Eigen::Matrix3d covarianceMatrix = sumSquared / numPoint - mean * mean.transpose(); + + // Compute Eigenvectors. + // Eigenvalues are ordered small to large. + // Worst case bound for zero eigenvalue from : https://eigen.tuxfamily.org/dox/classEigen_1_1SelfAdjointEigenSolver.html + Eigen::SelfAdjointEigenSolver solver; + solver.computeDirect(covarianceMatrix, Eigen::DecompositionOptions::ComputeEigenvectors); + if (solver.eigenvalues()(1) > 1e-8) { + Eigen::Vector3d unitaryNormalVector = solver.eigenvectors().col(0); + + // Check direction of the normal vector and flip the sign upwards + if (unitaryNormalVector.z() < 0.0) { + unitaryNormalVector = -unitaryNormalVector; + } + // The first eigenvalue might become slightly negative due to numerics. + double squareError = (solver.eigenvalues()(0) > 0.0) ? solver.eigenvalues()(0) : 0.0; + return {unitaryNormalVector, squareError}; + } else { // If second eigenvalue is zero, the normal is not defined. + return {Eigen::Vector3d::UnitZ(), 1e30}; + } +} + +} // namespace + +SlidingWindowPlaneExtractor::SlidingWindowPlaneExtractor(const SlidingWindowPlaneExtractorParameters& parameters, + const ransac_plane_extractor::RansacPlaneExtractorParameters& ransacParameters) + : parameters_(parameters), ransacParameters_(ransacParameters) {} + +void SlidingWindowPlaneExtractor::runExtraction(const grid_map::GridMap& map, const std::string& layer_height) { + // Extract basic map information + map_ = ↦ + elevationLayer_ = layer_height; + mapRows_ = map_->getSize()[0]; + segmentedPlanesMap_.resolution = map_->getResolution(); + map_->getPosition(Eigen::Vector2i(0, 0), segmentedPlanesMap_.mapOrigin); + + // Initialize based on map size. + segmentedPlanesMap_.highestLabel = -1; + segmentedPlanesMap_.labelPlaneParameters.clear(); + const auto& mapSize = map_->getSize(); + binaryImagePatch_ = cv::Mat(mapSize(0), mapSize(1), CV_8U, 0.0); // Zero initialize to set untouched pixels to not planar; + // Need a buffer of at least the linear size of the image. But no need to shrink if the buffer is already bigger. + const int linearMapSize = mapSize(0) * mapSize(1); + if (surfaceNormals_.size() < linearMapSize) { + surfaceNormals_.reserve(linearMapSize); + std::fill_n(surfaceNormals_.begin(), linearMapSize, Eigen::Vector3d::Zero()); + } + + // Run + runSlidingWindowDetector(); + runSegmentation(); + extractPlaneParametersFromLabeledImage(); + + // Get classification from segmentation to account for unassigned points. + binaryImagePatch_ = segmentedPlanesMap_.labeledImage > 0; + binaryImagePatch_.setTo(1, binaryImagePatch_ == 255); +} + +std::pair SlidingWindowPlaneExtractor::computeNormalAndErrorForWindow(const Eigen::MatrixXf& windowData) const { + // Gather surrounding data. + size_t nPoints = 0; + Eigen::Vector3d sum = Eigen::Vector3d::Zero(); + Eigen::Matrix3d sumSquared = Eigen::Matrix3d::Zero(); + for (int kernel_col = 0; kernel_col < parameters_.kernel_size; ++kernel_col) { + for (int kernel_row = 0; kernel_row < parameters_.kernel_size; ++kernel_row) { + float height = windowData(kernel_row, kernel_col); + if (!std::isfinite(height)) { + continue; + } + // No need to account for map offset. Will substract the mean anyway. + Eigen::Vector3d point{-kernel_row * segmentedPlanesMap_.resolution, -kernel_col * segmentedPlanesMap_.resolution, height}; + nPoints++; + sum += point; + sumSquared.noalias() += point * point.transpose(); + } + } + + if (nPoints < 3) { + // Not enough points to establish normal direction + return {Eigen::Vector3d::UnitZ(), 1e30}; + } else { + const Eigen::Vector3d mean = sum / nPoints; + return normalAndErrorFromCovariance(nPoints, mean, sumSquared); + } +} + +bool SlidingWindowPlaneExtractor::isLocallyPlanar(const Eigen::Vector3d& localNormal, double meanSquaredError) const { + const double thresholdSquared = parameters_.plane_patch_error_threshold * parameters_.plane_patch_error_threshold; + + // Dotproduct between normal and Eigen::Vector3d::UnitZ(); + const double normalDotProduct = localNormal.z(); + return (meanSquaredError < thresholdSquared && normalDotProduct > parameters_.local_plane_inclination_threshold); +} + +void SlidingWindowPlaneExtractor::runSlidingWindowDetector() { + grid_map::SlidingWindowIterator window_iterator(*map_, elevationLayer_, grid_map::SlidingWindowIterator::EdgeHandling::EMPTY, + parameters_.kernel_size); + const int kernelMiddle = (parameters_.kernel_size - 1) / 2; + + for (; !window_iterator.isPastEnd(); ++window_iterator) { + grid_map::Index index = *window_iterator; + Eigen::MatrixXf window_data = window_iterator.getData(); + const auto middleValue = window_data(kernelMiddle, kernelMiddle); + + if (!std::isfinite(middleValue)) { + binaryImagePatch_.at(index.x(), index.y()) = false; + } else { + Eigen::Vector3d n; + double meanSquaredError; + std::tie(n, meanSquaredError) = computeNormalAndErrorForWindow(window_data); + + surfaceNormals_[getLinearIndex(index.x(), index.y())] = n; + binaryImagePatch_.at(index.x(), index.y()) = isLocallyPlanar(n, meanSquaredError); + } + } + + // opening filter + if (parameters_.planarity_opening_filter > 0) { + const int openingKernelSize = 2 * parameters_.planarity_opening_filter + 1; + const int openingKernelType = cv::MORPH_CROSS; + const auto kernel_ = cv::getStructuringElement(openingKernelType, cv::Size(openingKernelSize, openingKernelSize)); + cv::morphologyEx(binaryImagePatch_, binaryImagePatch_, cv::MORPH_OPEN, kernel_, cv::Point(-1, -1), 1, cv::BORDER_REPLICATE); + } +} + +// Label cells according to which cell they belong to using connected component labeling. +void SlidingWindowPlaneExtractor::runSegmentation() { + int numberOfLabel = cv::connectedComponents(binaryImagePatch_, segmentedPlanesMap_.labeledImage, parameters_.connectivity, CV_32S); + segmentedPlanesMap_.highestLabel = numberOfLabel - 1; // Labels are [0, N-1] +} + +void SlidingWindowPlaneExtractor::extractPlaneParametersFromLabeledImage() { + const int numberOfExtractedPlanesWithoutRefinement = + segmentedPlanesMap_.highestLabel; // Make local copy. The highestLabel is incremented inside the loop + + // Reserve a workvector that is reused between processing labels + pointsWithNormal_.reserve(segmentedPlanesMap_.labeledImage.rows * segmentedPlanesMap_.labeledImage.cols); + + // Skip label 0. This is the background, i.e. non-planar region. + for (int label = 1; label <= numberOfExtractedPlanesWithoutRefinement; ++label) { + computePlaneParametersForLabel(label, pointsWithNormal_); + } +} + +void SlidingWindowPlaneExtractor::computePlaneParametersForLabel(int label, + std::vector& pointsWithNormal) { + const auto& elevationData = (*map_)[elevationLayer_]; + pointsWithNormal.clear(); // clear the workvector + + int numPoints = 0; + Eigen::Vector3d sum = Eigen::Vector3d::Zero(); + Eigen::Matrix3d sumSquared = Eigen::Matrix3d::Zero(); + for (int col = 0; col < segmentedPlanesMap_.labeledImage.cols; ++col) { + for (int row = 0; row < segmentedPlanesMap_.labeledImage.rows; ++row) { + if (segmentedPlanesMap_.labeledImage.at(row, col) == label) { + double height = elevationData(row, col); + if (std::isfinite(height)) { + const Eigen::Vector3d point3d{segmentedPlanesMap_.mapOrigin.x() - row * segmentedPlanesMap_.resolution, + segmentedPlanesMap_.mapOrigin.y() - col * segmentedPlanesMap_.resolution, height}; + + ++numPoints; + sum += point3d; + sumSquared.noalias() += point3d * point3d.transpose(); + + const auto& localSurfaceNormal = surfaceNormals_[getLinearIndex(row, col)]; + pointsWithNormal.emplace_back( + ransac_plane_extractor::Point3D(point3d.x(), point3d.y(), point3d.z()), + ransac_plane_extractor::Vector3D(localSurfaceNormal.x(), localSurfaceNormal.y(), localSurfaceNormal.z())); + } + } + } + } + if (numPoints < parameters_.min_number_points_per_label || numPoints < 3) { + // Label has too little points, no plane parameters are created + return; + } + + const Eigen::Vector3d supportVector = sum / numPoints; + const Eigen::Vector3d normalVector = normalAndErrorFromCovariance(numPoints, supportVector, sumSquared).first; + + if (parameters_.include_ransac_refinement) { // with RANSAC + if (isGloballyPlanar(normalVector, supportVector, pointsWithNormal)) { // Already planar enough + if (isWithinInclinationLimit(normalVector)) { + segmentedPlanesMap_.labelPlaneParameters.emplace_back(label, NormalAndPosition{supportVector, normalVector}); + } else { + setToBackground(label); + } + } else { + // Set entire label to background, so unassigned points are automatically in background + setToBackground(label); + refineLabelWithRansac(label, pointsWithNormal); + } + } else { // no RANSAC + if (isWithinInclinationLimit(normalVector)) { + segmentedPlanesMap_.labelPlaneParameters.emplace_back(label, NormalAndPosition{supportVector, normalVector}); + } else { + setToBackground(label); + } + } +} + +void SlidingWindowPlaneExtractor::refineLabelWithRansac(int label, std::vector& pointsWithNormal) { + // Fix the seed for each label to get deterministic behaviour + CGAL::get_default_random() = CGAL::Random(0); + + // Run ransac + ransac_plane_extractor::RansacPlaneExtractor ransac_plane_extractor(ransacParameters_); + ransac_plane_extractor.detectPlanes(pointsWithNormal); + const auto& planes = ransac_plane_extractor.getDetectedPlanes(); + + bool reuseLabel = true; + for (const auto& plane : planes) { + const auto planeInfo = ransac_plane_extractor::RansacPlaneExtractor::getPlaneParameters(plane.get()); + const auto& normalVector = planeInfo.first; + const auto& supportVector = planeInfo.second; + + if (isWithinInclinationLimit(normalVector)) { + // Bookkeeping of labels : reuse old label for the first plane + const int newLabel = (reuseLabel) ? label : ++segmentedPlanesMap_.highestLabel; + reuseLabel = false; + + segmentedPlanesMap_.labelPlaneParameters.emplace_back(newLabel, NormalAndPosition{supportVector, normalVector}); + + // Assign label in segmentation + for (const auto index : plane->indices_of_assigned_points()) { + const auto& point = pointsWithNormal[index].first; + + // Need to lookup indices in map, because RANSAC has reordered the points + Eigen::Array2i map_indices; + map_->getIndex(Eigen::Vector2d(point.x(), point.y()), map_indices); + segmentedPlanesMap_.labeledImage.at(map_indices(0), map_indices(1)) = newLabel; + } + } + } +} + +void SlidingWindowPlaneExtractor::addSurfaceNormalToMap(grid_map::GridMap& map, const std::string& layerPrefix) const { + map.add(layerPrefix + "_x"); + map.add(layerPrefix + "_y"); + map.add(layerPrefix + "_z"); + auto& dataX = map.get(layerPrefix + "_x"); + auto& dataY = map.get(layerPrefix + "_y"); + auto& dataZ = map.get(layerPrefix + "_z"); + + grid_map::SlidingWindowIterator window_iterator(map, layerPrefix + "_x", grid_map::SlidingWindowIterator::EdgeHandling::EMPTY, + parameters_.kernel_size); + + for (; !window_iterator.isPastEnd(); ++window_iterator) { + grid_map::Index index = *window_iterator; + const auto& n = surfaceNormals_[getLinearIndex(index.x(), index.y())]; + dataX(index.x(), index.y()) = n.x(); + dataY(index.x(), index.y()) = n.y(); + dataZ(index.x(), index.y()) = n.z(); + } +} + +bool SlidingWindowPlaneExtractor::isGloballyPlanar(const Eigen::Vector3d& normalVectorPlane, const Eigen::Vector3d& supportVectorPlane, + const std::vector& pointsWithNormal) const { + // Part of the plane projection that is independent of the point + const double normalDotSupportvector = normalVectorPlane.dot(supportVectorPlane); + + // Convert threshold in degrees to threshold on dot product (between normalized vectors) + const double dotProductThreshold = std::cos(parameters_.global_plane_fit_angle_error_threshold_degrees * M_PI / 180.0); + + for (const auto& pointWithNormal : pointsWithNormal) { + const double normalDotPoint = normalVectorPlane.x() * pointWithNormal.first.x() + normalVectorPlane.y() * pointWithNormal.first.y() + + normalVectorPlane.z() * pointWithNormal.first.z(); + const double distanceError = std::abs(normalDotPoint - normalDotSupportvector); + + const double dotProductNormals = normalVectorPlane.x() * pointWithNormal.second.x() + + normalVectorPlane.y() * pointWithNormal.second.y() + + normalVectorPlane.z() * pointWithNormal.second.z(); + + if (distanceError > parameters_.global_plane_fit_distance_error_threshold || dotProductNormals < dotProductThreshold) { + return false; + } + } + + return true; +} + +bool SlidingWindowPlaneExtractor::isWithinInclinationLimit(const Eigen::Vector3d& normalVectorPlane) const { + return normalVectorPlane.z() > parameters_.plane_inclination_threshold; +} + +void SlidingWindowPlaneExtractor::setToBackground(int label) { + segmentedPlanesMap_.labeledImage.setTo(0, segmentedPlanesMap_.labeledImage == label); +} + +} // namespace sliding_window_plane_extractor +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition/test/data/terrain.png b/plane_segmentation/convex_plane_decomposition/test/data/terrain.png new file mode 100644 index 00000000..65cfe247 Binary files /dev/null and b/plane_segmentation/convex_plane_decomposition/test/data/terrain.png differ diff --git a/plane_segmentation/convex_plane_decomposition/test/testConvexApproximation.cpp b/plane_segmentation/convex_plane_decomposition/test/testConvexApproximation.cpp new file mode 100644 index 00000000..78e80256 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/test/testConvexApproximation.cpp @@ -0,0 +1,101 @@ +// +// Created by rgrandia on 08.06.22. +// + +#include + +#include + +#include + +#include "convex_plane_decomposition/ConvexRegionGrowing.h" +#include "convex_plane_decomposition/GeometryUtils.h" +#include "convex_plane_decomposition/LoadGridmapFromImage.h" +#include "convex_plane_decomposition/PlaneDecompositionPipeline.h" +#include "convex_plane_decomposition/SegmentedPlaneProjection.h" + +using namespace convex_plane_decomposition; + +namespace { + +/** + * Brute force version of getBestPlanarRegionAtPositionInWorld, searches through all candidates without shortcuts + */ +PlanarTerrainProjection getBestPlanarRegionAtPositionInWorldNaive(const Eigen::Vector3d& positionInWorld, + const std::vector& planarRegions, + const std::function& penaltyFunction) { + // Do full project per region first + std::vector individualProjections; + std::for_each(planarRegions.begin(), planarRegions.end(), [&](const PlanarRegion& planarRegion) { + const Eigen::Vector3d positionInTerrainFrame = planarRegion.transformPlaneToWorld.inverse() * positionInWorld; + + PlanarTerrainProjection projection; + projection.regionPtr = &planarRegion; + projection.positionInTerrainFrame = projectToPlanarRegion({positionInTerrainFrame.x(), positionInTerrainFrame.y()}, planarRegion); + projection.positionInWorld = + positionInWorldFrameFromPosition2dInPlane(projection.positionInTerrainFrame, planarRegion.transformPlaneToWorld); + projection.cost = (positionInWorld - projection.positionInWorld).squaredNorm() + penaltyFunction(projection.positionInWorld); + individualProjections.push_back(projection); + }); + + // Find the minimum cost projection + return *std::min_element(individualProjections.begin(), individualProjections.end(), + [](const PlanarTerrainProjection& lhs, const PlanarTerrainProjection& rhs) { return lhs.cost < rhs.cost; }); +} +} // namespace + +TEST(TestConvexApproximation, runOnDemoMap) { + // Config + PlaneDecompositionPipeline::Config config; + const auto resolution = config.preprocessingParameters.resolution; + const std::string elevationLayer{"elevation_test"}; + const std::string frameId{"odom_test"}; + const Eigen::Array2d submapSize(3.0, 3.0); + std::string file = "terrain.png"; + double heightScale = 1.25; + + // Terrain Loading + boost::filesystem::path filePath(__FILE__); + std::string folder = filePath.parent_path().generic_string() + std::string{"/data/"}; + const auto loadedMap = loadGridmapFromImage(folder + file, elevationLayer, frameId, resolution, heightScale); + bool success = false; + auto elevationMap = loadedMap.getSubmap(loadedMap.getPosition(), submapSize, success); + ASSERT_TRUE(success); + + // Run pipeline. + PlaneDecompositionPipeline pipeline(config); + pipeline.update(std::move(elevationMap), elevationLayer); + const auto& planarTerrain = pipeline.getPlanarTerrain(); + + // Query a range of points + for (double x = -submapSize.x() / 2.0; x < submapSize.x() / 2.0; x += submapSize.x() / 4.0) { + for (double y = -submapSize.y() / 2.0; y < submapSize.y() / 2.0; y += submapSize.y() / 4.0) { + for (double z = -heightScale; z < heightScale; z += heightScale / 2.0) { + Eigen::Vector3d query{x, y, z}; + auto penaltyFunction = [](const Eigen::Vector3d& projectedPoint) { return 0.1 * std::abs(projectedPoint.z()); }; + + // Run projection and naive projection + const auto projection = getBestPlanarRegionAtPositionInWorld(query, planarTerrain.planarRegions, penaltyFunction); + const auto projectionCheck = getBestPlanarRegionAtPositionInWorldNaive(query, planarTerrain.planarRegions, penaltyFunction); + + // Check they are the same + ASSERT_EQ(projection.regionPtr, projectionCheck.regionPtr); + ASSERT_DOUBLE_EQ(projection.cost, projectionCheck.cost); + ASSERT_DOUBLE_EQ(projection.positionInTerrainFrame.x(), projectionCheck.positionInTerrainFrame.x()); + ASSERT_DOUBLE_EQ(projection.positionInTerrainFrame.y(), projectionCheck.positionInTerrainFrame.y()); + ASSERT_TRUE(projection.positionInWorld.isApprox(projectionCheck.positionInWorld)); + + // Check convex approximation with a range of settings + for (int numberOfVertices = 3; numberOfVertices < 7; ++numberOfVertices) { + for (double growthFactor = 1.01; growthFactor < 1.3; growthFactor += 0.1) { + const auto convexRegion = convex_plane_decomposition::growConvexPolygonInsideShape( + projection.regionPtr->boundaryWithInset.boundary, projection.positionInTerrainFrame, numberOfVertices, growthFactor); + + ASSERT_TRUE(std::all_of(convexRegion.vertices_begin(), convexRegion.vertices_end(), + [&](const CgalPoint2d& p) { return isInside(p, projection.regionPtr->boundaryWithInset.boundary); })); + } + } + } + } + } +} diff --git a/plane_segmentation/convex_plane_decomposition/test/testPipeline.cpp b/plane_segmentation/convex_plane_decomposition/test/testPipeline.cpp new file mode 100644 index 00000000..50e6df78 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/test/testPipeline.cpp @@ -0,0 +1,35 @@ +// +// Created by rgrandia on 16.03.22. +// + +#include + +#include + +#include "convex_plane_decomposition/LoadGridmapFromImage.h" +#include "convex_plane_decomposition/PlaneDecompositionPipeline.h" + +using namespace convex_plane_decomposition; + +TEST(TestPipeline, runOnDemoMap) { + // Config + PlaneDecompositionPipeline::Config config; + const auto resolution = config.preprocessingParameters.resolution; + const std::string elevationLayer{"elevation_test"}; + const std::string frameId{"odom_test"}; + const Eigen::Array2d submapSize(3.0, 3.0); + std::string file = "terrain.png"; + double heightScale = 1.25; + + // Terrain Loading + boost::filesystem::path filePath(__FILE__); + std::string folder = filePath.parent_path().generic_string() + std::string{"/data/"}; + const auto loadedMap = loadGridmapFromImage(folder + file, elevationLayer, frameId, resolution, heightScale); + bool success = false; + auto elevationMap = loadedMap.getSubmap(loadedMap.getPosition(), submapSize, success); + ASSERT_TRUE(success); + + // Run + PlaneDecompositionPipeline pipeline(config); + ASSERT_NO_THROW(pipeline.update(std::move(elevationMap), elevationLayer)); +} diff --git a/plane_segmentation/convex_plane_decomposition/test/testPlanarRegion.cpp b/plane_segmentation/convex_plane_decomposition/test/testPlanarRegion.cpp new file mode 100644 index 00000000..3b33818e --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/test/testPlanarRegion.cpp @@ -0,0 +1,85 @@ +// +// Created by rgrandia on 15.03.22. +// + +#include + +#include "convex_plane_decomposition/PlanarRegion.h" + +using namespace convex_plane_decomposition; + +TEST(TestPlanarRegion, getTransformFromNormalAndPosition_identity) { + NormalAndPosition normalAndPosition = {Eigen::Vector3d::Zero(), Eigen::Vector3d::UnitZ()}; + const auto transform = getTransformLocalToGlobal(normalAndPosition); + + // Orientation + ASSERT_TRUE(transform.linear().isApprox(Eigen::Matrix3d::Identity())); + + // Position + ASSERT_TRUE(transform.translation().isApprox(normalAndPosition.position)); +} + +TEST(TestPlanarRegion, getTransformFromNormalAndPosition_random) { + NormalAndPosition normalAndPosition = {Eigen::Vector3d(0.1, 0.2, 0.3), Eigen::Vector3d(0.4, 0.5, 0.6).normalized()}; + const auto transform = getTransformLocalToGlobal(normalAndPosition); + + const Eigen::Vector3d xAxisInWorld = transform.linear().col(0); + const Eigen::Vector3d yAxisInWorld = transform.linear().col(1); + const Eigen::Vector3d zAxisInWorld = transform.linear().col(2); + + // Z direction + ASSERT_TRUE(zAxisInWorld.isApprox(normalAndPosition.normal)); + + // Orthogonal + ASSERT_LT(std::abs(zAxisInWorld.dot(yAxisInWorld)), 1e-12); + ASSERT_LT(std::abs(zAxisInWorld.dot(xAxisInWorld)), 1e-12); + + // Scale + ASSERT_DOUBLE_EQ(xAxisInWorld.norm(), 1.0); + ASSERT_DOUBLE_EQ(yAxisInWorld.norm(), 1.0); + + // Position + ASSERT_TRUE(transform.translation().isApprox(normalAndPosition.position)); +} + +TEST(TestPlanarRegion, getTransformFromNormalAndPosition_unitX) { + NormalAndPosition normalAndPosition = {Eigen::Vector3d(0.1, 0.2, 0.3), Eigen::Vector3d::UnitX()}; + const auto transform = getTransformLocalToGlobal(normalAndPosition); + + const Eigen::Vector3d xAxisInWorld = transform.linear().col(0); + const Eigen::Vector3d yAxisInWorld = transform.linear().col(1); + const Eigen::Vector3d zAxisInWorld = transform.linear().col(2); + + // Z direction + ASSERT_TRUE(zAxisInWorld.isApprox(normalAndPosition.normal)); + + // XY + ASSERT_TRUE(xAxisInWorld.isApprox(-Eigen::Vector3d::UnitZ())); + ASSERT_TRUE(yAxisInWorld.isApprox(Eigen::Vector3d::UnitY())); + + // Position + ASSERT_TRUE(transform.translation().isApprox(normalAndPosition.position)); +} + +TEST(TestPlanarRegion, projectPositionInWorldOntoPlaneAlongGravity) { + const NormalAndPosition normalAndPosition = {Eigen::Vector3d(0.1, 0.2, 0.3), Eigen::Vector3d(0.4, 0.5, 0.6).normalized()}; + const auto transformPlaneToWorld = getTransformLocalToGlobal(normalAndPosition); + + // Origin of plane is projected to 0, 0 + const CgalPoint2d originXY = {normalAndPosition.position.x(), normalAndPosition.position.y()}; + const CgalPoint2d originInPlane = projectToPlaneAlongGravity(originXY, transformPlaneToWorld); + ASSERT_LT(std::abs(originInPlane.x()), 1e-12); + ASSERT_LT(std::abs(originInPlane.y()), 1e-12); + + // Random point projected + const Eigen::Vector2d queryPosition = Eigen::Vector2d::Random(); + const CgalPoint2d projectedPositionInPlane = projectToPlaneAlongGravity({queryPosition.x(), queryPosition.y()}, transformPlaneToWorld); + + // Convert back to world to check + Eigen::Vector3d projectedPositionInPlane3d = {projectedPositionInPlane.x(), projectedPositionInPlane.y(), 0.0}; + Eigen::Vector3d projectedPositionInWorld3d = transformPlaneToWorld * projectedPositionInPlane3d; + + // x, y position remained to same + ASSERT_DOUBLE_EQ(projectedPositionInWorld3d.x(), queryPosition.x()); + ASSERT_DOUBLE_EQ(projectedPositionInWorld3d.y(), queryPosition.y()); +} \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/test/testRegionGrowing.cpp b/plane_segmentation/convex_plane_decomposition/test/testRegionGrowing.cpp new file mode 100644 index 00000000..dca141a4 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/test/testRegionGrowing.cpp @@ -0,0 +1,69 @@ +// +// Created by rgrandia on 02.02.22. +// + + +#include + +#include "convex_plane_decomposition/ConvexRegionGrowing.h" + +#include + +using namespace convex_plane_decomposition; + +TEST(TestRegionGrowing, center_on_border) { + // Rare case where the region algorithm go stuck + const int numberOfVertices = 16; // Multiple of 4 is nice for symmetry. + const double growthFactor = 1.05; + CgalPoint2d center(0.0, 1.0); + + CgalPolygonWithHoles2d parentShape; + parentShape.outer_boundary().container() = {{1, -1}, {1, 1}, {-1, 1}, {-1, -1}}; + + const auto convexInnerApprox = growConvexPolygonInsideShape(parentShape, center, numberOfVertices, growthFactor); + ASSERT_TRUE(convexInnerApprox.is_convex()); + for (auto it = convexInnerApprox.vertices_begin(); it!=convexInnerApprox.vertices_end(); ++it) { + ASSERT_TRUE(isInside(*it, parentShape)); + } +} + +TEST(TestRegionGrowing, debug_case) { + // Rare case where the region algorithm got stuck + const int numberOfVertices = 16; + const double growthFactor = 1.05; + CgalPoint2d center(-0.147433, 0.800114); + + CgalPolygonWithHoles2d parentShape; + parentShape.outer_boundary().container() = { + {1.03923, -0.946553}, {1.03923, 0.840114}, {0.7859, 0.840114}, {0.772567, 0.853447}, {0.759233, 0.853447}, + {0.7459, 0.86678}, {0.7459, 0.880114}, {0.732567, 0.893447}, {0.719233, 0.893447}, {0.7059, 0.90678}, + {0.7059, 1.05345}, {0.652567, 1.05345}, {0.652567, 0.90678}, {0.639233, 0.893447}, {0.6259, 0.893447}, + {0.612567, 0.880114}, {0.612567, 0.86678}, {0.599233, 0.853447}, {0.5859, 0.853447}, {0.572567, 0.840114}, + {0.532567, 0.840114}, {0.532567, 0.82678}, {0.519233, 0.813447}, {0.5059, 0.813447}, {0.492567, 0.800114}, + {0.3059, 0.800114}, {0.292567, 0.813447}, {0.279233, 0.813447}, {0.2659, 0.82678}, {0.2659, 0.840114}, + {0.252567, 0.853447}, {0.239233, 0.853447}, {0.2259, 0.86678}, {0.2259, 0.920114}, {0.212567, 0.933447}, + {0.199233, 0.933447}, {0.1859, 0.94678}, {0.1859, 1.05345}, {0.132567, 1.05345}, {0.132567, 0.86678}, + {0.119233, 0.853447}, {0.1059, 0.853447}, {0.0925666, 0.840114}, {0.0925666, 0.82678}, {0.0792332, 0.813447}, + {0.0658999, 0.813447}, {0.0525666, 0.800114}, {-0.1341, 0.800114}, {-0.147433, 0.813447}, {-0.160767, 0.813447}, + {-0.1741, 0.82678}, {-0.1741, 0.840114}, {-0.2141, 0.840114}, {-0.227433, 0.853447}, {-0.240767, 0.853447}, + {-0.2541, 0.86678}, {-0.2541, 0.880114}, {-0.267433, 0.893447}, {-0.280767, 0.893447}, {-0.2941, 0.90678}, + {-0.2941, 1.05345}, {-0.960767, 1.05345}, {-0.960767, -0.946553}}; + + CgalPolygon2d hole; + hole.container() = {{0.5459, -0.266553}, {0.532566, -0.279886}, {0.3059, -0.279886}, {0.292566, -0.266553}, {0.279233, -0.266553}, + {0.2659, -0.25322}, {0.2659, -0.239886}, {0.252566, -0.226553}, {0.239233, -0.226553}, {0.2259, -0.21322}, + {0.2259, 0.320114}, {0.239233, 0.333447}, {0.252566, 0.333447}, {0.2659, 0.34678}, {0.532567, 0.34678}, + {0.5459, 0.333447}, {0.559233, 0.333447}, {0.572567, 0.320114}, {0.572567, 0.30678}, {0.5859, 0.293447}, + {0.599233, 0.293447}, {0.612567, 0.280114}, {0.612566, 0.0667803}, {0.6259, 0.053447}, {0.639233, 0.053447}, + {0.652566, 0.0401136}, {0.652566, -0.17322}, {0.639233, -0.186553}, {0.6259, -0.186553}, {0.612566, -0.199886}, + {0.612566, -0.21322}, {0.599233, -0.226553}, {0.5859, -0.226553}, {0.572566, -0.239886}, {0.572566, -0.25322}, + {0.559233, -0.266553}}; + parentShape.holes().push_back(std::move(hole)); + + const auto convexInnerApprox = growConvexPolygonInsideShape(parentShape, center, numberOfVertices, growthFactor); + + ASSERT_TRUE(convexInnerApprox.is_convex()); + for (auto it = convexInnerApprox.vertices_begin(); it!=convexInnerApprox.vertices_end(); ++it) { + ASSERT_TRUE(isInside(*it, parentShape)); + } +} \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition/test/testUpsampling.cpp b/plane_segmentation/convex_plane_decomposition/test/testUpsampling.cpp new file mode 100644 index 00000000..06538b10 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition/test/testUpsampling.cpp @@ -0,0 +1,28 @@ +// +// Created by rgrandia on 26.10.21. +// + +#include + +#include "convex_plane_decomposition/contour_extraction/Upsampling.h" + +TEST(TestUpsampling, upsampleImage) { + // clang-format off + cv::Mat M = (cv::Mat_(3, 3) << + 1, 2, 3, + 4, 5, 6, + 7, 8, 9); + cv::Mat MoutCheck = (cv::Mat_(7, 7) << + 1, 1, 2, 2, 2, 3, 3, + 1, 1, 2, 2, 2, 3, 3, + 4, 4, 5, 5, 5, 6, 6, + 4, 4, 5, 5, 5, 6, 6, + 4, 4, 5, 5, 5, 6, 6, + 7, 7, 8, 8, 8, 9, 9, + 7, 7, 8, 8, 8, 9, 9); + // clang-format on + + const auto Mout = convex_plane_decomposition::contour_extraction::upSample(M); + + ASSERT_TRUE(std::equal(MoutCheck.begin(), MoutCheck.end(), Mout.begin())); +} \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_msgs/CMakeLists.txt b/plane_segmentation/convex_plane_decomposition_msgs/CMakeLists.txt new file mode 100644 index 00000000..5c25b7a6 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_msgs/CMakeLists.txt @@ -0,0 +1,25 @@ +cmake_minimum_required(VERSION 3.5) +project(convex_plane_decomposition_msgs) + +# Find ament and necessary packages +find_package(ament_cmake REQUIRED) +find_package(rosidl_default_generators REQUIRED) +find_package(std_msgs REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(grid_map_msgs REQUIRED) + +# Add message files for ROS2 +rosidl_generate_interfaces(${PROJECT_NAME} + "msg/BoundingBox2d.msg" + "msg/PlanarRegion.msg" + "msg/PlanarTerrain.msg" + "msg/Point2d.msg" + "msg/Polygon2d.msg" + "msg/PolygonWithHoles2d.msg" + DEPENDENCIES std_msgs geometry_msgs grid_map_msgs +) + +# Replace catkin_package with ament_package +ament_package( + # ... existing export settings if any ... +) diff --git a/plane_segmentation/convex_plane_decomposition_msgs/msg/BoundingBox2d.msg b/plane_segmentation/convex_plane_decomposition_msgs/msg/BoundingBox2d.msg new file mode 100644 index 00000000..2327348a --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_msgs/msg/BoundingBox2d.msg @@ -0,0 +1,5 @@ +# 3D, axis-aligned, bounding box +float32 min_x +float32 min_y +float32 max_x +float32 max_y \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_msgs/msg/PlanarRegion.msg b/plane_segmentation/convex_plane_decomposition_msgs/msg/PlanarRegion.msg new file mode 100644 index 00000000..d93d2478 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_msgs/msg/PlanarRegion.msg @@ -0,0 +1,7 @@ +geometry_msgs/Pose plane_parameters + +BoundingBox2d bbox2d + +PolygonWithHoles2d boundary + +PolygonWithHoles2d[] insets diff --git a/plane_segmentation/convex_plane_decomposition_msgs/msg/PlanarTerrain.msg b/plane_segmentation/convex_plane_decomposition_msgs/msg/PlanarTerrain.msg new file mode 100644 index 00000000..3b8f895e --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_msgs/msg/PlanarTerrain.msg @@ -0,0 +1,2 @@ +PlanarRegion[] planar_regions +grid_map_msgs/GridMap gridmap \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_msgs/msg/Point2d.msg b/plane_segmentation/convex_plane_decomposition_msgs/msg/Point2d.msg new file mode 100644 index 00000000..220abf75 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_msgs/msg/Point2d.msg @@ -0,0 +1,2 @@ +float32 x +float32 y \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_msgs/msg/Polygon2d.msg b/plane_segmentation/convex_plane_decomposition_msgs/msg/Polygon2d.msg new file mode 100644 index 00000000..e52128c5 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_msgs/msg/Polygon2d.msg @@ -0,0 +1,2 @@ +# Specification of a 2D polygon where the first and last points are connected +Point2d[] points \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_msgs/msg/PolygonWithHoles2d.msg b/plane_segmentation/convex_plane_decomposition_msgs/msg/PolygonWithHoles2d.msg new file mode 100644 index 00000000..8513f45c --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_msgs/msg/PolygonWithHoles2d.msg @@ -0,0 +1,3 @@ +# Specification of a 2D polygon with holes +Polygon2d outer_boundary +Polygon2d[] holes \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_msgs/package.xml b/plane_segmentation/convex_plane_decomposition_msgs/package.xml new file mode 100644 index 00000000..c6672ab9 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_msgs/package.xml @@ -0,0 +1,25 @@ + + + + convex_plane_decomposition_msgs + 0.0.1 + + Messages for Convex Plane Decomposition. + + Lorenzo Terenzi + Apache-2.0 + + ament_cmake + rosidl_default_generators + rosidl_runtime_cpp + std_msgs + geometry_msgs + grid_map_msgs + + + rosidl_interface_packages + + + ament_cmake + + \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_ros/CMakeLists.txt b/plane_segmentation/convex_plane_decomposition_ros/CMakeLists.txt new file mode 100644 index 00000000..f94b85c6 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/CMakeLists.txt @@ -0,0 +1,129 @@ +cmake_minimum_required(VERSION 3.10) +project(convex_plane_decomposition_ros) + +# Catkin dependencies +set(CATKIN_PACKAGE_DEPENDENCIES + roscpp + grid_map_core + grid_map_ros + grid_map_cv + grid_map_msgs + geometry_msgs + convex_plane_decomposition + convex_plane_decomposition_msgs + tf2_ros + visualization_msgs +) + +find_package(catkin REQUIRED COMPONENTS + ${CATKIN_PACKAGE_DEPENDENCIES} +) + +# OpenCv +find_package(OpenCV REQUIRED) + +# Eigen +find_package(Eigen3 3.3 REQUIRED NO_MODULE) + +# Cpp standard version +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +################################### +## catkin specific configuration ## +################################### +catkin_package( + INCLUDE_DIRS include ${EIGEN3_INCLUDE_DIRS} + LIBRARIES ${PROJECT_NAME} + CATKIN_DEPENDS ${CATKIN_PACKAGE_DEPENDENCIES} + DEPENDS OpenCV +) + +########### +## Build ## +########### + +include_directories( + include + ${EIGEN3_INCLUDE_DIRS} + ${catkin_INCLUDE_DIRS} +) + +add_library(${PROJECT_NAME} + src/ConvexPlaneDecompositionRos.cpp + src/MessageConversion.cpp + src/ParameterLoading.cpp + src/RosVisualizations.cpp + ) +add_dependencies(${PROJECT_NAME} + ${catkin_EXPORTED_TARGETS} + ) +target_link_libraries(${PROJECT_NAME} + ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} + ) + +add_executable(${PROJECT_NAME}_node + src/ConvexPlaneDecompositionNode.cpp +) +target_link_libraries(${PROJECT_NAME}_node + ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} + ${PROJECT_NAME} +) + +add_executable(${PROJECT_NAME}_add_noise + src/noiseNode.cpp +) +target_link_libraries(${PROJECT_NAME}_add_noise + ${catkin_LIBRARIES} +) + +add_executable(${PROJECT_NAME}_save_elevationmap + src/SaveElevationMapAsImageNode.cpp + ) +target_link_libraries(${PROJECT_NAME}_save_elevationmap + ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} + ) + +add_executable(${PROJECT_NAME}_approximation_demo_node + src/ConvexApproximationDemoNode.cpp + ) +target_link_libraries(${PROJECT_NAME}_approximation_demo_node + ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} + ${PROJECT_NAME} + ) + +############# +## Install ## +############# + +install(TARGETS ${PROJECT_NAME} + ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} + LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} + RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} + ) +install(DIRECTORY include/${PROJECT_NAME}/ + DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} + ) + +install(TARGETS ${PROJECT_NAME}_node + RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) + +install(DIRECTORY config data launch rviz + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) + +############# +## Testing ## +############# +add_executable(${PROJECT_NAME}_TestShapeGrowing + test/TestShapeGrowing.cpp +) +target_link_libraries(${PROJECT_NAME}_TestShapeGrowing + ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} +) \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_ros/config/demo_node.yaml b/plane_segmentation/convex_plane_decomposition_ros/config/demo_node.yaml new file mode 100644 index 00000000..6ded383e --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/config/demo_node.yaml @@ -0,0 +1,8 @@ +elevation_topic: '/elevation_mapping/elevation_map_raw' +height_layer: 'elevation' +target_frame_id: 'odom' +submap: + width: 8.0 + length: 8.0 +publish_to_controller: true +frequency: 1.0 diff --git a/plane_segmentation/convex_plane_decomposition_ros/config/node.yaml b/plane_segmentation/convex_plane_decomposition_ros/config/node.yaml new file mode 100644 index 00000000..68790760 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/config/node.yaml @@ -0,0 +1,8 @@ +elevation_topic: '/elevation_mapping/elevation_map_raw' +height_layer: 'elevation' +target_frame_id: 'odom' +submap: + width: 3.0 + length: 3.0 +publish_to_controller: true +frequency: 20.0 diff --git a/plane_segmentation/convex_plane_decomposition_ros/config/parameters.yaml b/plane_segmentation/convex_plane_decomposition_ros/config/parameters.yaml new file mode 100644 index 00000000..fa153584 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/config/parameters.yaml @@ -0,0 +1,34 @@ +preprocessing: + resolution: 0.04 # Resampling resolution, set negative to skip, requires inpainting to be used + kernelSize: 3 # Kernel size of the median filter, either 3 or 5 + numberOfRepeats: 1 # Number of times to apply the same filter + +sliding_window_plane_extractor: + kernel_size: 3 # Size of the sliding window patch used for normal vector calculation and planarity detection. Should be an odd number and at least 3. + planarity_opening_filter: 0 # [#] Apply opening filter (erosion -> dilation) to planarity detection by this amount of pixels + plane_inclination_threshold_degrees: 30.0 # [deg] Maximum allowed angle between the surface normal and the world-z direction for a patch + local_plane_inclination_threshold_degrees: 35.0 # [deg] Maximum allowed angle between the surface normal and the world-z direction for a cell + plane_patch_error_threshold: 0.02 # [m] The allowed root-mean-squared deviation from the plane fitted to the sliding window. + min_number_points_per_label: 4 # [#] Labels with less points assigned to them are discarded + connectivity: 4 # Label kernel connectivity. 4 or 8 (cross or box) + include_ransac_refinement: true # Enable RANSAC refinement if the plane is not globally fitting to the assigned points. + global_plane_fit_distance_error_threshold: 0.025 # [m] Allowed maximum distance from a point to the plane. If any point violates this, RANSAC is triggered + global_plane_fit_angle_error_threshold_degrees: 25.0 # [deg] Allowed normal vector deviation for a point w.r.t. the plane normal. If any point violates this, RANSAC is triggered + +ransac_plane_refinement: + probability: 0.001 # Probability to miss the largest candidate shape. A lower probability provides a higher reliability and determinism at the cost of longer running times + min_points: 4 # This minimum number of points per plane. Controls the termination of the algorithm. + epsilon: 0.025 # Maximum distance to plane + cluster_epsilon: 0.041 # [m] Set maximum Euclidean distance between points to be clustered. Two points are connected if separated by a distance of at most 2*sqrt(2)*cluster_epsilon = 2.828 * cluster_epsilon. Set this higher than resolution + normal_threshold: 25.0 # [deg] Set maximum normal deviation between cluster surface_normal and point normal. + +contour_extraction: + marginSize: 1 # Size of the kernel creating the boundary offset. In number of (sub) pixels. + +postprocessing: + extracted_planes_height_offset: 0.0 # Added offset in Z direction for the full map to compensate for the location of the foot frame w.r.t. the contact point + nonplanar_height_offset: 0.02 # Added offset in Z direction for non-planar cells of the map. (Additional to extracted_planes_height_offset) + nonplanar_horizontal_offset: 1 # Added offset in XY direction for non-planar cells of the map. In number of pixels. + smoothing_dilation_size: 0.2 # Half the width of the dilation used before the smooth layer [m] + smoothing_box_kernel_size: 0.1 # Half the width of the box kernel used for the smooth layer [m] + smoothing_gauss_kernel_size: 0.05 # Half the width of the Gaussian kernel used for the smooth layer [m] \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_ros/data/holes.png b/plane_segmentation/convex_plane_decomposition_ros/data/holes.png new file mode 100644 index 00000000..f997bbe5 Binary files /dev/null and b/plane_segmentation/convex_plane_decomposition_ros/data/holes.png differ diff --git a/plane_segmentation/convex_plane_decomposition_ros/data/real_stairs_125cm.png b/plane_segmentation/convex_plane_decomposition_ros/data/real_stairs_125cm.png new file mode 100644 index 00000000..9268d492 Binary files /dev/null and b/plane_segmentation/convex_plane_decomposition_ros/data/real_stairs_125cm.png differ diff --git a/plane_segmentation/convex_plane_decomposition_ros/data/slope_1m_1m_20cm.png b/plane_segmentation/convex_plane_decomposition_ros/data/slope_1m_1m_20cm.png new file mode 100644 index 00000000..d7afe889 Binary files /dev/null and b/plane_segmentation/convex_plane_decomposition_ros/data/slope_1m_1m_20cm.png differ diff --git a/plane_segmentation/convex_plane_decomposition_ros/data/straight_stairs_1m_1m_60cm.png b/plane_segmentation/convex_plane_decomposition_ros/data/straight_stairs_1m_1m_60cm.png new file mode 100644 index 00000000..cf42ffd2 Binary files /dev/null and b/plane_segmentation/convex_plane_decomposition_ros/data/straight_stairs_1m_1m_60cm.png differ diff --git a/plane_segmentation/convex_plane_decomposition_ros/data/terrain.png b/plane_segmentation/convex_plane_decomposition_ros/data/terrain.png new file mode 100644 index 00000000..65cfe247 Binary files /dev/null and b/plane_segmentation/convex_plane_decomposition_ros/data/terrain.png differ diff --git a/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/ConvexPlaneDecompositionRos.h b/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/ConvexPlaneDecompositionRos.h new file mode 100644 index 00000000..d57f8d6d --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/ConvexPlaneDecompositionRos.h @@ -0,0 +1,62 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include + +#include + +#include + +namespace convex_plane_decomposition { + +// Forward declaration of the pipeline +class PlaneDecompositionPipeline; + +class ConvexPlaneExtractionROS { + public: + ConvexPlaneExtractionROS(ros::NodeHandle& nodeHandle); + + ~ConvexPlaneExtractionROS(); + + private: + bool loadParameters(const ros::NodeHandle& nodeHandle); + + /** + * Callback method for the incoming grid map message. + * @param message the incoming message. + */ + void callback(const grid_map_msgs::GridMap& message); + + Eigen::Isometry3d getTransformToTargetFrame(const std::string& sourceFrame, const ros::Time& time); + + // Parameters + std::string elevationMapTopic_; + std::string elevationLayer_; + std::string targetFrameId_; + double subMapWidth_; + double subMapLength_; + bool publishToController_; + + // ROS communication + ros::Subscriber elevationMapSubscriber_; + ros::Publisher filteredmapPublisher_; + ros::Publisher boundaryPublisher_; + ros::Publisher insetPublisher_; + ros::Publisher regionPublisher_; + tf2_ros::Buffer tfBuffer_; + tf2_ros::TransformListener tfListener_; + + // Pipeline + std::unique_ptr planeDecompositionPipeline_; + + // Timing + Timer callbackTimer_; +}; + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/MessageConversion.h b/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/MessageConversion.h new file mode 100644 index 00000000..53006225 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/MessageConversion.h @@ -0,0 +1,42 @@ +// +// Created by rgrandia on 14.06.20. +// + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace convex_plane_decomposition { + +CgalBbox2d fromMessage(const convex_plane_decomposition_msgs::BoundingBox2d& msg); +convex_plane_decomposition_msgs::BoundingBox2d toMessage(const CgalBbox2d& bbox2d); + +PlanarRegion fromMessage(const convex_plane_decomposition_msgs::PlanarRegion& msg); +convex_plane_decomposition_msgs::PlanarRegion toMessage(const PlanarRegion& planarRegion); + +PlanarTerrain fromMessage(const convex_plane_decomposition_msgs::PlanarTerrain& msg); +convex_plane_decomposition_msgs::PlanarTerrain toMessage(const PlanarTerrain& planarTerrain); + +Eigen::Isometry3d fromMessage(const geometry_msgs::Pose& msg); +geometry_msgs::Pose toMessage(const Eigen::Isometry3d& transform); + +CgalPoint2d fromMessage(const convex_plane_decomposition_msgs::Point2d& msg); +convex_plane_decomposition_msgs::Point2d toMessage(const CgalPoint2d& point2d); + +CgalPolygon2d fromMessage(const convex_plane_decomposition_msgs::Polygon2d& msg); +convex_plane_decomposition_msgs::Polygon2d toMessage(const CgalPolygon2d& polygon2d); + +CgalPolygonWithHoles2d fromMessage(const convex_plane_decomposition_msgs::PolygonWithHoles2d& msg); +convex_plane_decomposition_msgs::PolygonWithHoles2d toMessage(const CgalPolygonWithHoles2d& polygonWithHoles2d); + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/ParameterLoading.h b/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/ParameterLoading.h new file mode 100644 index 00000000..704ad73a --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/ParameterLoading.h @@ -0,0 +1,30 @@ +// +// Created by rgrandia on 10.06.20. +// + +#pragma once + +#include + +#include +#include +#include +#include +#include + +namespace convex_plane_decomposition { + +PreprocessingParameters loadPreprocessingParameters(const ros::NodeHandle& nodeHandle, const std::string& prefix); + +contour_extraction::ContourExtractionParameters loadContourExtractionParameters(const ros::NodeHandle& nodeHandle, + const std::string& prefix); + +ransac_plane_extractor::RansacPlaneExtractorParameters loadRansacPlaneExtractorParameters(const ros::NodeHandle& nodeHandle, + const std::string& prefix); + +sliding_window_plane_extractor::SlidingWindowPlaneExtractorParameters loadSlidingWindowPlaneExtractorParameters( + const ros::NodeHandle& nodeHandle, const std::string& prefix); + +PostprocessingParameters loadPostprocessingParameters(const ros::NodeHandle& nodeHandle, const std::string& prefix); + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/RosVisualizations.h b/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/RosVisualizations.h new file mode 100644 index 00000000..e3596562 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/include/convex_plane_decomposition_ros/RosVisualizations.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include +#include + +#include + +namespace convex_plane_decomposition { + +geometry_msgs::PolygonStamped to3dRosPolygon(const CgalPolygon2d& polygon, const Eigen::Isometry3d& transformPlaneToWorld, + const std_msgs::Header& header); + +std::vector to3dRosPolygon(const CgalPolygonWithHoles2d& polygonWithHoles, + const Eigen::Isometry3d& transformPlaneToWorld, const std_msgs::Header& header); + +visualization_msgs::MarkerArray convertBoundariesToRosMarkers(const std::vector& planarRegions, const std::string& frameId, + grid_map::Time time, double lineWidth); + +visualization_msgs::MarkerArray convertInsetsToRosMarkers(const std::vector& planarRegions, const std::string& frameId, + grid_map::Time time, double lineWidth); + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition_ros/launch/convex_plane_decomposition.launch b/plane_segmentation/convex_plane_decomposition_ros/launch/convex_plane_decomposition.launch new file mode 100644 index 00000000..6f7053b6 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/launch/convex_plane_decomposition.launch @@ -0,0 +1,14 @@ + + + + + + + + + + + + diff --git a/plane_segmentation/convex_plane_decomposition_ros/launch/demo.launch b/plane_segmentation/convex_plane_decomposition_ros/launch/demo.launch new file mode 100644 index 00000000..d592db55 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/launch/demo.launch @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/plane_segmentation/convex_plane_decomposition_ros/launch/save_elevation_map.launch b/plane_segmentation/convex_plane_decomposition_ros/launch/save_elevation_map.launch new file mode 100644 index 00000000..4197e474 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/launch/save_elevation_map.launch @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/plane_segmentation/convex_plane_decomposition_ros/package.xml b/plane_segmentation/convex_plane_decomposition_ros/package.xml new file mode 100644 index 00000000..fac0c52b --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/package.xml @@ -0,0 +1,26 @@ + + + convex_plane_decomposition_ros + 0.0.0 + The convex_plane_decomposition_ros package + + Ruben Grandia + + MIT + + catkin + + roscpp + grid_map_core + grid_map_ros + grid_map_cv + grid_map_msgs + geometry_msgs + convex_plane_decomposition + convex_plane_decomposition_msgs + tf2_ros + visualization_msgs + + grid_map_demos + + diff --git a/plane_segmentation/convex_plane_decomposition_ros/rviz/config_demo.rviz b/plane_segmentation/convex_plane_decomposition_ros/rviz/config_demo.rviz new file mode 100644 index 00000000..0d306212 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/rviz/config_demo.rviz @@ -0,0 +1,194 @@ +Panels: + - Class: rviz/Displays + Help Height: 0 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + Splitter Ratio: 0.45271122455596924 + Tree Height: 1658 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: "" +Preferences: + PromptSaveOnExit: true +Toolbars: + toolButtonStyle: 2 +Visualization Manager: + Class: "" + Displays: + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /convex_plane_decomposition_ros/boundaries + Name: Boundaries + Namespaces: + "": true + Queue Size: 100 + Value: true + - Class: rviz/MarkerArray + Enabled: false + Marker Topic: /convex_plane_decomposition_ros/insets + Name: Insets + Namespaces: + { } + Queue Size: 100 + Value: false + - Alpha: 0.10000000149011612 + Autocompute Intensity Bounds: true + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: elevation + Color Transformer: GridMapLayer + Enabled: false + Grid Cell Decimation: 1 + Grid Line Thickness: 0.10000000149011612 + Height Layer: elevation + Height Transformer: GridMapLayer + History Length: 1 + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 10 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: GridMap + Show Grid Lines: true + Topic: /elevation_mapping/elevation_map_raw + Unreliable: false + Use Rainbow: true + Value: false + - Alpha: 0.6000000238418579 + Autocompute Intensity Bounds: true + Class: grid_map_rviz_plugin/GridMap + Color: 200; 200; 200 + Color Layer: plane_classification + Color Transformer: IntensityLayer + Enabled: false + Grid Cell Decimation: 1 + Grid Line Thickness: 0.10000000149011612 + Height Layer: elevation_before_postprocess + Height Transformer: "" + History Length: 1 + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 10 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: FilteredMap + Show Grid Lines: true + Topic: /convex_plane_decomposition_ros/filtered_map + Unreliable: false + Use Rainbow: false + Value: false + - Alpha: 1 + Class: rviz/PointStamped + Color: 204; 41; 204 + Enabled: true + History Length: 1 + Name: Query + Queue Size: 10 + Radius: 0.10000000149011612 + Topic: /convex_plane_decomposition_ros_approximation_demo_node/queryPosition + Unreliable: false + Value: true + - Alpha: 1 + Class: rviz/PointStamped + Color: 245; 121; 0 + Enabled: true + History Length: 1 + Name: Projection + Queue Size: 10 + Radius: 0.10000000149011612 + Topic: /convex_plane_decomposition_ros_approximation_demo_node/projectedQueryPosition + Unreliable: false + Value: true + - Alpha: 1 + Class: rviz/Polygon + Color: 245; 121; 0 + Enabled: true + Name: ConvexApproximation + Queue Size: 10 + Topic: /convex_plane_decomposition_ros_approximation_demo_node/convex_terrain + Unreliable: false + Value: true + Enabled: true + Global Options: + Background Color: 255; 255; 255 + Default Light: true + Fixed Frame: odom + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Theta std deviation: 0.2617993950843811 + Topic: /initialpose + X std deviation: 0.5 + Y std deviation: 0.5 + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/XYOrbit + Distance: 7.742486953735352 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Field of View: 0.7853981852531433 + Focal Point: + X: -3.113455057144165 + Y: 1.4824542999267578 + Z: 3.337860107421875e-05 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.3057960569858551 + Target Frame: + Yaw: 2.410000801086426 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 2032 + Hide Left Dock: false + Hide Right Dock: false + QMainWindow State: 000000ff00000000fd0000000400000000000002b8000006e8fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b000000b000fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed0000033300000185000000b0fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000006e000006e80000018200fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f00000375fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000003750000013200fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e1000001970000000300000e7000000060fc0100000002fb0000000800540069006d0065010000000000000e700000057100fffffffb0000000800540069006d0065010000000000000450000000000000000000000bac000006e800000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: false + Width: 3696 + X: 144 + Y: 54 diff --git a/plane_segmentation/convex_plane_decomposition_ros/src/ConvexApproximationDemoNode.cpp b/plane_segmentation/convex_plane_decomposition_ros/src/ConvexApproximationDemoNode.cpp new file mode 100644 index 00000000..3447091f --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/src/ConvexApproximationDemoNode.cpp @@ -0,0 +1,100 @@ +// +// Created by rgrandia on 24.06.20. +// + +#include + +#include + +#include + +#include +#include +#include + +#include + +#include +#include + +const std::string frameId = "odom"; +std::mutex terrainMutex; +std::unique_ptr planarTerrainPtr; + +void callback(const convex_plane_decomposition_msgs::PlanarTerrain::ConstPtr& msg) { + std::unique_ptr newTerrain( + new convex_plane_decomposition::PlanarTerrain(convex_plane_decomposition::fromMessage(*msg))); + + std::lock_guard lock(terrainMutex); + planarTerrainPtr.swap(newTerrain); +} + +geometry_msgs::PointStamped toMarker(const Eigen::Vector3d& position, const std_msgs::Header& header) { + geometry_msgs::PointStamped sphere; + sphere.header = header; + sphere.point.x = position.x(); + sphere.point.y = position.y(); + sphere.point.z = position.z(); + return sphere; +} + +float randomFloat(float a, float b) { + float random = ((float)rand()) / (float)RAND_MAX; + float diff = b - a; + float r = random * diff; + return a + r; +} + +int main(int argc, char** argv) { + ros::init(argc, argv, "convex_approximation_demo_node"); + ros::NodeHandle nodeHandle("~"); + + // Publishers for visualization + auto positionPublisher = nodeHandle.advertise("queryPosition", 1); + auto projectionPublisher = nodeHandle.advertise("projectedQueryPosition", 1); + auto convexTerrainPublisher = nodeHandle.advertise("convex_terrain", 1); + auto terrainSubscriber = nodeHandle.subscribe("/convex_plane_decomposition_ros/planar_terrain", 1, &callback); + + // Node loop + ros::Rate rate(ros::Duration(1.0)); + while (ros::ok()) { + { + std::lock_guard lock(terrainMutex); + if (planarTerrainPtr) { + const auto& map = planarTerrainPtr->gridMap; + + // Find edges. + double maxX = map.getPosition().x() + map.getLength().x() * 0.5; + double minX = map.getPosition().x() - map.getLength().x() * 0.5; + double maxY = map.getPosition().y() + map.getLength().y() * 0.5; + double minY = map.getPosition().y() - map.getLength().y() * 0.5; + + Eigen::Vector3d query{randomFloat(minX, maxX), randomFloat(minY, maxY), randomFloat(0.0, 1.0)}; + auto penaltyFunction = [](const Eigen::Vector3d& projectedPoint) { return 0.0; }; + + const auto projection = getBestPlanarRegionAtPositionInWorld(query, planarTerrainPtr->planarRegions, penaltyFunction); + + int numberOfVertices = 16; + double growthFactor = 1.05; + const auto convexRegion = convex_plane_decomposition::growConvexPolygonInsideShape( + projection.regionPtr->boundaryWithInset.boundary, projection.positionInTerrainFrame, numberOfVertices, growthFactor); + + std_msgs::Header header; + header.stamp.fromNSec(planarTerrainPtr->gridMap.getTimestamp()); + header.frame_id = frameId; + + auto convexRegionMsg = + convex_plane_decomposition::to3dRosPolygon(convexRegion, projection.regionPtr->transformPlaneToWorld, header); + + convexTerrainPublisher.publish(convexRegionMsg); + positionPublisher.publish(toMarker(query, header)); + projectionPublisher.publish(toMarker(projection.positionInWorld, header)); + } + } + + ros::spinOnce(); + rate.sleep(); + } + + return 0; +} \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_ros/src/ConvexPlaneDecompositionNode.cpp b/plane_segmentation/convex_plane_decomposition_ros/src/ConvexPlaneDecompositionNode.cpp new file mode 100644 index 00000000..3d622df7 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/src/ConvexPlaneDecompositionNode.cpp @@ -0,0 +1,23 @@ +#include "convex_plane_decomposition_ros/ConvexPlaneDecompositionRos.h" + +#include + +int main(int argc, char** argv) { + ros::init(argc, argv, "convex_plane_decomposition_ros"); + ros::NodeHandle nodeHandle("~"); + + double frequency; + if (!nodeHandle.getParam("frequency", frequency)) { + ROS_ERROR("[ConvexPlaneDecompositionNode] Could not read parameter `frequency`."); + return 1; + } + + convex_plane_decomposition::ConvexPlaneExtractionROS convex_plane_decomposition_ros(nodeHandle); + + ros::Rate rate(frequency); + while (ros::ok()) { + ros::spinOnce(); + rate.sleep(); + } + return 0; +} diff --git a/plane_segmentation/convex_plane_decomposition_ros/src/ConvexPlaneDecompositionRos.cpp b/plane_segmentation/convex_plane_decomposition_ros/src/ConvexPlaneDecompositionRos.cpp new file mode 100644 index 00000000..04c6f2e1 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/src/ConvexPlaneDecompositionRos.cpp @@ -0,0 +1,187 @@ +#include "convex_plane_decomposition_ros/ConvexPlaneDecompositionRos.h" + +#include +#include +#include + +#include +#include + +#include "convex_plane_decomposition_ros/MessageConversion.h" +#include "convex_plane_decomposition_ros/ParameterLoading.h" +#include "convex_plane_decomposition_ros/RosVisualizations.h" + +namespace convex_plane_decomposition { + +ConvexPlaneExtractionROS::ConvexPlaneExtractionROS(ros::NodeHandle& nodeHandle) : tfBuffer_(), tfListener_(tfBuffer_) { + bool parametersLoaded = loadParameters(nodeHandle); + + if (parametersLoaded) { + elevationMapSubscriber_ = nodeHandle.subscribe(elevationMapTopic_, 1, &ConvexPlaneExtractionROS::callback, this); + filteredmapPublisher_ = nodeHandle.advertise("filtered_map", 1); + boundaryPublisher_ = nodeHandle.advertise("boundaries", 1); + insetPublisher_ = nodeHandle.advertise("insets", 1); + regionPublisher_ = nodeHandle.advertise("planar_terrain", 1); + } +} + +ConvexPlaneExtractionROS::~ConvexPlaneExtractionROS() { + if (callbackTimer_.getNumTimedIntervals() > 0 && planeDecompositionPipeline_ != nullptr) { + std::stringstream infoStream; + infoStream << "\n########################################################################\n"; + infoStream << "The benchmarking is computed over " << callbackTimer_.getNumTimedIntervals() << " iterations. \n"; + infoStream << "PlaneExtraction Benchmarking : Average time [ms], Max time [ms]\n"; + auto printLine = [](std::string name, const Timer& timer) { + std::stringstream ss; + ss << std::fixed << std::setprecision(2); + ss << "\t" << name << "\t: " << std::setw(17) << timer.getAverageInMilliseconds() << ", " << std::setw(13) + << timer.getMaxIntervalInMilliseconds() << "\n"; + return ss.str(); + }; + infoStream << printLine("Pre-process ", planeDecompositionPipeline_->getPrepocessTimer()); + infoStream << printLine("Sliding window ", planeDecompositionPipeline_->getSlidingWindowTimer()); + infoStream << printLine("Contour extraction ", planeDecompositionPipeline_->getContourExtractionTimer()); + infoStream << printLine("Post-process ", planeDecompositionPipeline_->getPostprocessTimer()); + infoStream << printLine("Total callback ", callbackTimer_); + std::cerr << infoStream.str() << std::endl; + } +} + +bool ConvexPlaneExtractionROS::loadParameters(const ros::NodeHandle& nodeHandle) { + if (!nodeHandle.getParam("elevation_topic", elevationMapTopic_)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `elevation_topic`."); + return false; + } + if (!nodeHandle.getParam("target_frame_id", targetFrameId_)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `target_frame_id`."); + return false; + } + if (!nodeHandle.getParam("height_layer", elevationLayer_)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `height_layer`."); + return false; + } + if (!nodeHandle.getParam("submap/width", subMapWidth_)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `submap/width`."); + return false; + } + if (!nodeHandle.getParam("submap/length", subMapLength_)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `submap/length`."); + return false; + } + if (!nodeHandle.getParam("publish_to_controller", publishToController_)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `publish_to_controller`."); + return false; + } + + PlaneDecompositionPipeline::Config config; + config.preprocessingParameters = loadPreprocessingParameters(nodeHandle, "preprocessing/"); + config.contourExtractionParameters = loadContourExtractionParameters(nodeHandle, "contour_extraction/"); + config.ransacPlaneExtractorParameters = loadRansacPlaneExtractorParameters(nodeHandle, "ransac_plane_refinement/"); + config.slidingWindowPlaneExtractorParameters = loadSlidingWindowPlaneExtractorParameters(nodeHandle, "sliding_window_plane_extractor/"); + config.postprocessingParameters = loadPostprocessingParameters(nodeHandle, "postprocessing/"); + + planeDecompositionPipeline_ = std::make_unique(config); + + return true; +} + +void ConvexPlaneExtractionROS::callback(const grid_map_msgs::GridMap& message) { + callbackTimer_.startTimer(); + + // Convert message to map. + grid_map::GridMap messageMap; + std::vector layers{elevationLayer_}; + grid_map::GridMapRosConverter::fromMessage(message, messageMap, layers, false, false); + if (!containsFiniteValue(messageMap.get(elevationLayer_))) { + ROS_WARN("[ConvexPlaneExtractionROS] map does not contain any values"); + callbackTimer_.endTimer(); + return; + } + + // Transform map if necessary + if (targetFrameId_ != messageMap.getFrameId()) { + std::string errorMsg; + ros::Time timeStamp = ros::Time(0); // Use Time(0) to get the latest transform. + if (tfBuffer_.canTransform(targetFrameId_, messageMap.getFrameId(), timeStamp, &errorMsg)) { + const auto transform = getTransformToTargetFrame(messageMap.getFrameId(), timeStamp); + messageMap = grid_map::GridMapCvProcessing::getTransformedMap(std::move(messageMap), transform, elevationLayer_, targetFrameId_); + } else { + ROS_ERROR_STREAM("[ConvexPlaneExtractionROS] " << errorMsg); + callbackTimer_.endTimer(); + return; + } + } + + // Extract submap + bool success; + const grid_map::Position submapPosition = [&]() { + // The map center might be between cells. Taking the submap there can result in changing submap dimensions. + // project map center to an index and index to center s.t. we get the location of a cell. + grid_map::Index centerIndex; + grid_map::Position centerPosition; + messageMap.getIndex(messageMap.getPosition(), centerIndex); + messageMap.getPosition(centerIndex, centerPosition); + return centerPosition; + }(); + grid_map::GridMap elevationMap = messageMap.getSubmap(submapPosition, Eigen::Array2d(subMapLength_, subMapWidth_), success); + if (!success) { + ROS_WARN("[ConvexPlaneExtractionROS] Could not extract submap"); + callbackTimer_.endTimer(); + return; + } + const grid_map::Matrix elevationRaw = elevationMap.get(elevationLayer_); + + // Run pipeline. + planeDecompositionPipeline_->update(std::move(elevationMap), elevationLayer_); + auto& planarTerrain = planeDecompositionPipeline_->getPlanarTerrain(); + + // Publish terrain + if (publishToController_) { + regionPublisher_.publish(toMessage(planarTerrain)); + } + + // --- Visualize in Rviz --- Not published to the controller + // Add raw map + planarTerrain.gridMap.add("elevation_raw", elevationRaw); + + // Add segmentation + planarTerrain.gridMap.add("segmentation"); + planeDecompositionPipeline_->getSegmentation(planarTerrain.gridMap.get("segmentation")); + + grid_map_msgs::GridMap outputMessage; + grid_map::GridMapRosConverter::toMessage(planarTerrain.gridMap, outputMessage); + filteredmapPublisher_.publish(outputMessage); + + const double lineWidth = 0.005; // [m] RViz marker size + boundaryPublisher_.publish(convertBoundariesToRosMarkers(planarTerrain.planarRegions, planarTerrain.gridMap.getFrameId(), + planarTerrain.gridMap.getTimestamp(), lineWidth)); + insetPublisher_.publish(convertInsetsToRosMarkers(planarTerrain.planarRegions, planarTerrain.gridMap.getFrameId(), + planarTerrain.gridMap.getTimestamp(), lineWidth)); + + callbackTimer_.endTimer(); +} + +Eigen::Isometry3d ConvexPlaneExtractionROS::getTransformToTargetFrame(const std::string& sourceFrame, const ros::Time& time) { + geometry_msgs::TransformStamped transformStamped; + try { + transformStamped = tfBuffer_.lookupTransform(targetFrameId_, sourceFrame, time); + } catch (tf2::TransformException& ex) { + ROS_ERROR("[ConvexPlaneExtractionROS] %s", ex.what()); + return Eigen::Isometry3d::Identity(); + } + + Eigen::Isometry3d transformation; + + // Extract translation. + transformation.translation().x() = transformStamped.transform.translation.x; + transformation.translation().y() = transformStamped.transform.translation.y; + transformation.translation().z() = transformStamped.transform.translation.z; + + // Extract rotation. + Eigen::Quaterniond rotationQuaternion(transformStamped.transform.rotation.w, transformStamped.transform.rotation.x, + transformStamped.transform.rotation.y, transformStamped.transform.rotation.z); + transformation.linear() = rotationQuaternion.toRotationMatrix(); + return transformation; +} + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition_ros/src/MessageConversion.cpp b/plane_segmentation/convex_plane_decomposition_ros/src/MessageConversion.cpp new file mode 100644 index 00000000..5194bfe0 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/src/MessageConversion.cpp @@ -0,0 +1,143 @@ +// +// Created by rgrandia on 14.06.20. +// + +#include "convex_plane_decomposition_ros/MessageConversion.h" + +#include + +namespace convex_plane_decomposition { + +CgalBbox2d fromMessage(const convex_plane_decomposition_msgs::BoundingBox2d& msg) { + return {msg.min_x, msg.min_y, msg.max_x, msg.max_y}; +} + +convex_plane_decomposition_msgs::BoundingBox2d toMessage(const CgalBbox2d& bbox2d) { + convex_plane_decomposition_msgs::BoundingBox2d msg; + msg.min_x = bbox2d.xmin(); + msg.min_y = bbox2d.ymin(); + msg.max_x = bbox2d.xmax(); + msg.max_y = bbox2d.ymax(); + return msg; +} + +PlanarRegion fromMessage(const convex_plane_decomposition_msgs::PlanarRegion& msg) { + PlanarRegion planarRegion; + planarRegion.transformPlaneToWorld = fromMessage(msg.plane_parameters); + planarRegion.boundaryWithInset.boundary = fromMessage(msg.boundary); + planarRegion.boundaryWithInset.insets.reserve(msg.insets.size()); + for (const auto& inset : msg.insets) { + planarRegion.boundaryWithInset.insets.emplace_back(fromMessage(inset)); + } + planarRegion.bbox2d = fromMessage(msg.bbox2d); + return planarRegion; +} + +convex_plane_decomposition_msgs::PlanarRegion toMessage(const PlanarRegion& planarRegion) { + convex_plane_decomposition_msgs::PlanarRegion msg; + msg.plane_parameters = toMessage(planarRegion.transformPlaneToWorld); + msg.boundary = toMessage(planarRegion.boundaryWithInset.boundary); + msg.insets.reserve(planarRegion.boundaryWithInset.insets.size()); + for (const auto& inset : planarRegion.boundaryWithInset.insets) { + msg.insets.emplace_back(toMessage(inset)); + } + msg.bbox2d = toMessage(planarRegion.bbox2d); + return msg; +} + +PlanarTerrain fromMessage(const convex_plane_decomposition_msgs::PlanarTerrain& msg) { + PlanarTerrain planarTerrain; + planarTerrain.planarRegions.reserve(msg.planarRegions.size()); + for (const auto& planarRegion : msg.planarRegions) { + planarTerrain.planarRegions.emplace_back(fromMessage(planarRegion)); + } + grid_map::GridMapRosConverter::fromMessage(msg.gridmap, planarTerrain.gridMap); + return planarTerrain; +} + +convex_plane_decomposition_msgs::PlanarTerrain toMessage(const PlanarTerrain& planarTerrain) { + convex_plane_decomposition_msgs::PlanarTerrain msg; + msg.planarRegions.reserve(planarTerrain.planarRegions.size()); + for (const auto& planarRegion : planarTerrain.planarRegions) { + msg.planarRegions.emplace_back(toMessage(planarRegion)); + } + grid_map::GridMapRosConverter::toMessage(planarTerrain.gridMap, msg.gridmap); + return msg; +} + +Eigen::Isometry3d fromMessage(const geometry_msgs::Pose& msg) { + Eigen::Isometry3d transform; + transform.translation().x() = msg.position.x; + transform.translation().y() = msg.position.y; + transform.translation().z() = msg.position.z; + Eigen::Quaterniond orientation; + orientation.x() = msg.orientation.x; + orientation.y() = msg.orientation.y; + orientation.z() = msg.orientation.z; + orientation.w() = msg.orientation.w; + transform.linear() = orientation.toRotationMatrix(); + return transform; +} + +geometry_msgs::Pose toMessage(const Eigen::Isometry3d& transform) { + geometry_msgs::Pose pose; + pose.position.x = transform.translation().x(); + pose.position.y = transform.translation().y(); + pose.position.z = transform.translation().z(); + Eigen::Quaterniond terrainOrientation(transform.linear()); + pose.orientation.x = terrainOrientation.x(); + pose.orientation.y = terrainOrientation.y(); + pose.orientation.z = terrainOrientation.z(); + pose.orientation.w = terrainOrientation.w(); + return pose; +} + +CgalPoint2d fromMessage(const convex_plane_decomposition_msgs::Point2d& msg) { + return {msg.x, msg.y}; +} + +convex_plane_decomposition_msgs::Point2d toMessage(const CgalPoint2d& point2d) { + convex_plane_decomposition_msgs::Point2d msg; + msg.x = point2d.x(); + msg.y = point2d.y(); + return msg; +} + +CgalPolygon2d fromMessage(const convex_plane_decomposition_msgs::Polygon2d& msg) { + CgalPolygon2d polygon2d; + polygon2d.container().reserve(msg.points.size()); + for (const auto& point : msg.points) { + polygon2d.container().emplace_back(fromMessage(point)); + } + return polygon2d; +} + +convex_plane_decomposition_msgs::Polygon2d toMessage(const CgalPolygon2d& polygon2d) { + convex_plane_decomposition_msgs::Polygon2d msg; + msg.points.reserve(polygon2d.container().size()); + for (const auto& point : polygon2d) { + msg.points.emplace_back(toMessage(point)); + } + return msg; +} + +CgalPolygonWithHoles2d fromMessage(const convex_plane_decomposition_msgs::PolygonWithHoles2d& msg) { + CgalPolygonWithHoles2d polygonWithHoles2d; + polygonWithHoles2d.outer_boundary() = fromMessage(msg.outer_boundary); + for (const auto& hole : msg.holes) { + polygonWithHoles2d.add_hole(fromMessage(hole)); + } + return polygonWithHoles2d; +} + +convex_plane_decomposition_msgs::PolygonWithHoles2d toMessage(const CgalPolygonWithHoles2d& polygonWithHoles2d) { + convex_plane_decomposition_msgs::PolygonWithHoles2d msg; + msg.outer_boundary = toMessage(polygonWithHoles2d.outer_boundary()); + msg.holes.reserve(polygonWithHoles2d.number_of_holes()); + for (const auto& hole : polygonWithHoles2d.holes()) { + msg.holes.emplace_back(toMessage(hole)); + } + return msg; +} + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition_ros/src/ParameterLoading.cpp b/plane_segmentation/convex_plane_decomposition_ros/src/ParameterLoading.cpp new file mode 100644 index 00000000..3fac5d4c --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/src/ParameterLoading.cpp @@ -0,0 +1,78 @@ +// +// Created by rgrandia on 10.06.20. +// + +#include "convex_plane_decomposition_ros/ParameterLoading.h" + +namespace convex_plane_decomposition { + +template +bool loadParameter(const ros::NodeHandle& nodeHandle, const std::string& prefix, const std::string& param, T& value) { + if (!nodeHandle.getParam(prefix + param, value)) { + ROS_ERROR_STREAM("[ConvexPlaneExtractionROS] Could not read parameter `" + << param << "`. Setting parameter to default value : " << std::to_string(value)); + return false; + } else { + return true; + } +} + +PreprocessingParameters loadPreprocessingParameters(const ros::NodeHandle& nodeHandle, const std::string& prefix) { + PreprocessingParameters preprocessingParameters; + loadParameter(nodeHandle, prefix, "resolution", preprocessingParameters.resolution); + loadParameter(nodeHandle, prefix, "kernelSize", preprocessingParameters.kernelSize); + loadParameter(nodeHandle, prefix, "numberOfRepeats", preprocessingParameters.numberOfRepeats); + return preprocessingParameters; +} + +contour_extraction::ContourExtractionParameters loadContourExtractionParameters(const ros::NodeHandle& nodeHandle, + const std::string& prefix) { + contour_extraction::ContourExtractionParameters contourParams; + loadParameter(nodeHandle, prefix, "marginSize", contourParams.marginSize); + return contourParams; +} + +ransac_plane_extractor::RansacPlaneExtractorParameters loadRansacPlaneExtractorParameters(const ros::NodeHandle& nodeHandle, + const std::string& prefix) { + ransac_plane_extractor::RansacPlaneExtractorParameters ransacParams; + loadParameter(nodeHandle, prefix, "probability", ransacParams.probability); + loadParameter(nodeHandle, prefix, "min_points", ransacParams.min_points); + loadParameter(nodeHandle, prefix, "epsilon", ransacParams.epsilon); + loadParameter(nodeHandle, prefix, "cluster_epsilon", ransacParams.cluster_epsilon); + loadParameter(nodeHandle, prefix, "normal_threshold", ransacParams.normal_threshold); + return ransacParams; +} + +sliding_window_plane_extractor::SlidingWindowPlaneExtractorParameters loadSlidingWindowPlaneExtractorParameters( + const ros::NodeHandle& nodeHandle, const std::string& prefix) { + sliding_window_plane_extractor::SlidingWindowPlaneExtractorParameters swParams; + loadParameter(nodeHandle, prefix, "kernel_size", swParams.kernel_size); + loadParameter(nodeHandle, prefix, "planarity_opening_filter", swParams.planarity_opening_filter); + if (loadParameter(nodeHandle, prefix, "plane_inclination_threshold_degrees", swParams.plane_inclination_threshold)) { + swParams.plane_inclination_threshold = std::cos(swParams.plane_inclination_threshold * M_PI / 180.0); + } + if (loadParameter(nodeHandle, prefix, "local_plane_inclination_threshold_degrees", swParams.local_plane_inclination_threshold)) { + swParams.local_plane_inclination_threshold = std::cos(swParams.local_plane_inclination_threshold * M_PI / 180.0); + } + loadParameter(nodeHandle, prefix, "plane_patch_error_threshold", swParams.plane_patch_error_threshold); + loadParameter(nodeHandle, prefix, "min_number_points_per_label", swParams.min_number_points_per_label); + loadParameter(nodeHandle, prefix, "connectivity", swParams.connectivity); + loadParameter(nodeHandle, prefix, "include_ransac_refinement", swParams.include_ransac_refinement); + loadParameter(nodeHandle, prefix, "global_plane_fit_distance_error_threshold", swParams.global_plane_fit_distance_error_threshold); + loadParameter(nodeHandle, prefix, "global_plane_fit_angle_error_threshold_degrees", + swParams.global_plane_fit_angle_error_threshold_degrees); + return swParams; +} + +PostprocessingParameters loadPostprocessingParameters(const ros::NodeHandle& nodeHandle, const std::string& prefix) { + PostprocessingParameters postprocessingParameters; + loadParameter(nodeHandle, prefix, "extracted_planes_height_offset", postprocessingParameters.extracted_planes_height_offset); + loadParameter(nodeHandle, prefix, "nonplanar_height_offset", postprocessingParameters.nonplanar_height_offset); + loadParameter(nodeHandle, prefix, "nonplanar_horizontal_offset", postprocessingParameters.nonplanar_horizontal_offset); + loadParameter(nodeHandle, prefix, "smoothing_dilation_size", postprocessingParameters.smoothing_dilation_size); + loadParameter(nodeHandle, prefix, "smoothing_box_kernel_size", postprocessingParameters.smoothing_box_kernel_size); + loadParameter(nodeHandle, prefix, "smoothing_gauss_kernel_size", postprocessingParameters.smoothing_gauss_kernel_size); + return postprocessingParameters; +} + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition_ros/src/RosVisualizations.cpp b/plane_segmentation/convex_plane_decomposition_ros/src/RosVisualizations.cpp new file mode 100644 index 00000000..8cd1b72e --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/src/RosVisualizations.cpp @@ -0,0 +1,159 @@ +#include "convex_plane_decomposition_ros/RosVisualizations.h" + +#include + +namespace convex_plane_decomposition { + +geometry_msgs::PolygonStamped to3dRosPolygon(const CgalPolygon2d& polygon, const Eigen::Isometry3d& transformPlaneToWorld, + const std_msgs::Header& header) { + geometry_msgs::PolygonStamped polygon3d; + polygon3d.header = header; + polygon3d.polygon.points.reserve(polygon.size()); + for (const auto& point : polygon) { + geometry_msgs::Point32 point_ros; + const auto pointInWorld = positionInWorldFrameFromPosition2dInPlane(point, transformPlaneToWorld); + point_ros.x = static_cast(pointInWorld.x()); + point_ros.y = static_cast(pointInWorld.y()); + point_ros.z = static_cast(pointInWorld.z()); + polygon3d.polygon.points.push_back(point_ros); + } + return polygon3d; +} + +std::vector to3dRosPolygon(const CgalPolygonWithHoles2d& polygonWithHoles, + const Eigen::Isometry3d& transformPlaneToWorld, const std_msgs::Header& header) { + std::vector polygons; + + polygons.reserve(polygonWithHoles.number_of_holes() + 1); + polygons.emplace_back(to3dRosPolygon(polygonWithHoles.outer_boundary(), transformPlaneToWorld, header)); + + for (const auto& hole : polygonWithHoles.holes()) { + polygons.emplace_back(to3dRosPolygon(hole, transformPlaneToWorld, header)); + } + return polygons; +} + +namespace { // Helper functions for convertBoundariesToRosMarkers and convertInsetsToRosMarkers +std_msgs::ColorRGBA getColor(int id, float alpha = 1.0) { + constexpr int numColors = 7; + using RGB = std::array; + // clang-format off + static const std::array, numColors> colorMap{ + RGB{0.0000F, 0.4470F, 0.7410F}, + RGB{0.8500F, 0.3250F, 0.0980F}, + RGB{0.9290F, 0.6940F, 0.1250F}, + RGB{0.4940F, 0.1840F, 0.5560F}, + RGB{0.4660F, 0.6740F, 0.1880F}, + RGB{0.6350F, 0.0780F, 0.1840F}, + RGB{0.2500F, 0.2500F, 0.2500F} + }; + // clang-format on + + std_msgs::ColorRGBA colorMsg; + const auto& rgb = colorMap[id % numColors]; + colorMsg.r = rgb[0]; + colorMsg.g = rgb[1]; + colorMsg.b = rgb[2]; + colorMsg.a = alpha; + return colorMsg; +} + +visualization_msgs::Marker to3dRosMarker(const CgalPolygon2d& polygon, const Eigen::Isometry3d& transformPlaneToWorld, + const std_msgs::Header& header, const std_msgs::ColorRGBA& color, int id, double lineWidth) { + visualization_msgs::Marker line; + line.id = id; + line.header = header; + line.type = visualization_msgs::Marker::LINE_STRIP; + line.scale.x = lineWidth; + line.color = color; + if (!polygon.is_empty()) { + line.points.reserve(polygon.size() + 1); + for (const auto& point : polygon) { + const auto pointInWorld = positionInWorldFrameFromPosition2dInPlane(point, transformPlaneToWorld); + geometry_msgs::Point point_ros; + point_ros.x = pointInWorld.x(); + point_ros.y = pointInWorld.y(); + point_ros.z = pointInWorld.z(); + line.points.push_back(point_ros); + } + // repeat the first point to close to polygon + const auto pointInWorld = positionInWorldFrameFromPosition2dInPlane(polygon.vertex(0), transformPlaneToWorld); + geometry_msgs::Point point_ros; + point_ros.x = pointInWorld.x(); + point_ros.y = pointInWorld.y(); + point_ros.z = pointInWorld.z(); + line.points.push_back(point_ros); + } + line.pose.orientation.w = 1.0; + line.pose.orientation.x = 0.0; + line.pose.orientation.y = 0.0; + line.pose.orientation.z = 0.0; + return line; +} + +visualization_msgs::MarkerArray to3dRosMarker(const CgalPolygonWithHoles2d& polygonWithHoles, + const Eigen::Isometry3d& transformPlaneToWorld, const std_msgs::Header& header, + const std_msgs::ColorRGBA& color, int id, double lineWidth) { + visualization_msgs::MarkerArray polygons; + + polygons.markers.reserve(polygonWithHoles.number_of_holes() + 1); + polygons.markers.emplace_back(to3dRosMarker(polygonWithHoles.outer_boundary(), transformPlaneToWorld, header, color, id, lineWidth)); + ++id; + + for (const auto& hole : polygonWithHoles.holes()) { + polygons.markers.emplace_back(to3dRosMarker(hole, transformPlaneToWorld, header, color, id, lineWidth)); + ++id; + } + return polygons; +} +} // namespace + +visualization_msgs::MarkerArray convertBoundariesToRosMarkers(const std::vector& planarRegions, const std::string& frameId, + grid_map::Time time, double lineWidth) { + std_msgs::Header header; + header.stamp.fromNSec(time); + header.frame_id = frameId; + + visualization_msgs::Marker deleteMarker; + deleteMarker.action = visualization_msgs::Marker::DELETEALL; + + visualization_msgs::MarkerArray polygon_buffer; + polygon_buffer.markers.reserve(planarRegions.size() + 1); // lower bound + polygon_buffer.markers.push_back(deleteMarker); + int colorIdx = 0; + for (const auto& planarRegion : planarRegions) { + const auto color = getColor(colorIdx++); + int label = polygon_buffer.markers.size(); + auto boundaries = + to3dRosMarker(planarRegion.boundaryWithInset.boundary, planarRegion.transformPlaneToWorld, header, color, label, lineWidth); + std::move(boundaries.markers.begin(), boundaries.markers.end(), std::back_inserter(polygon_buffer.markers)); + } + + return polygon_buffer; +} + +visualization_msgs::MarkerArray convertInsetsToRosMarkers(const std::vector& planarRegions, const std::string& frameId, + grid_map::Time time, double lineWidth) { + std_msgs::Header header; + header.stamp.fromNSec(time); + header.frame_id = frameId; + + visualization_msgs::Marker deleteMarker; + deleteMarker.action = visualization_msgs::Marker::DELETEALL; + + visualization_msgs::MarkerArray polygon_buffer; + polygon_buffer.markers.reserve(planarRegions.size() + 1); // lower bound + polygon_buffer.markers.push_back(deleteMarker); + int colorIdx = 0; + for (const auto& planarRegion : planarRegions) { + const auto color = getColor(colorIdx++); + for (const auto& inset : planarRegion.boundaryWithInset.insets) { + int label = polygon_buffer.markers.size(); + auto insets = to3dRosMarker(inset, planarRegion.transformPlaneToWorld, header, color, label, lineWidth); + std::move(insets.markers.begin(), insets.markers.end(), std::back_inserter(polygon_buffer.markers)); + } + } + return polygon_buffer; +} + +} // namespace convex_plane_decomposition diff --git a/plane_segmentation/convex_plane_decomposition_ros/src/SaveElevationMapAsImageNode.cpp b/plane_segmentation/convex_plane_decomposition_ros/src/SaveElevationMapAsImageNode.cpp new file mode 100644 index 00000000..8f471b22 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/src/SaveElevationMapAsImageNode.cpp @@ -0,0 +1,73 @@ +// +// Created by rgrandia on 11.06.20. +// + +#include + +#include +#include +#include + +#include + +int count = 0; +double frequency; +std::string elevationMapTopic; +std::string elevationLayer; +std::string imageName; + +void callback(const grid_map_msgs::GridMap::ConstPtr& message) { + grid_map::GridMap messageMap; + grid_map::GridMapRosConverter::fromMessage(*message, messageMap); + + const auto& data = messageMap[elevationLayer]; + float maxHeight = std::numeric_limits::lowest(); + float minHeight = std::numeric_limits::max(); + for (int i = 0; i < data.rows(); i++) { + for (int j = 0; j < data.cols(); j++) { + const auto value = data(i, j); + if (!std::isnan(value)) { + maxHeight = std::max(maxHeight, value); + minHeight = std::min(minHeight, value); + } + } + } + + cv::Mat image; + grid_map::GridMapCvConverter::toImage(messageMap, elevationLayer, CV_8UC1, minHeight, maxHeight, image); + + int range = 100 * (maxHeight - minHeight); + cv::imwrite(imageName + "_" + std::to_string(count++) + "_" + std::to_string(range) + "cm.png", image); +} + +int main(int argc, char** argv) { + ros::init(argc, argv, "save_elevation_map_to_image"); + ros::NodeHandle nodeHandle("~"); + + if (!nodeHandle.getParam("frequency", frequency)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `frequency`."); + return 1; + } + if (!nodeHandle.getParam("elevation_topic", elevationMapTopic)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `elevation_topic`."); + return 1; + } + if (!nodeHandle.getParam("height_layer", elevationLayer)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `height_layer`."); + return 1; + } + if (!nodeHandle.getParam("imageName", imageName)) { + ROS_ERROR("[ConvexPlaneExtractionROS] Could not read parameter `imageName`."); + return 1; + } + + auto elevationMapSubscriber_ = nodeHandle.subscribe(elevationMapTopic, 1, callback); + + ros::Rate rate(frequency); + while (ros::ok()) { + ros::spinOnce(); + rate.sleep(); + } + + return 0; +} \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_ros/src/noiseNode.cpp b/plane_segmentation/convex_plane_decomposition_ros/src/noiseNode.cpp new file mode 100644 index 00000000..c65f03de --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/src/noiseNode.cpp @@ -0,0 +1,107 @@ +// +// Created by rgrandia on 25.10.21. +// + +#include + +#include +#include + +#include +#include + +double noiseUniform; +double noiseGauss; +double outlierPercentage; +bool blur; +double frequency; +std::string elevationMapTopicIn; +std::string elevationMapTopicOut; +std::string elevationLayer; +ros::Publisher publisher; +grid_map::GridMap::Matrix noiseLayer; + +void createNoise(size_t row, size_t col) { + // Box-Muller Transform + grid_map::GridMap::Matrix u1 = 0.5 * grid_map::GridMap::Matrix::Random(row, col).array() + 0.5; + grid_map::GridMap::Matrix u2 = 0.5 * grid_map::GridMap::Matrix::Random(row, col).array() + 0.5; + grid_map::GridMap::Matrix gauss01 = + u1.binaryExpr(u2, [&](float v1, float v2) { return std::sqrt(-2.0f * log(v1)) * cos(2.0f * M_PIf32 * v2); }); + + noiseLayer = noiseUniform * grid_map::GridMap::Matrix::Random(row, col) + noiseGauss * gauss01; +} + +void callback(const grid_map_msgs::GridMap::ConstPtr& message) { + grid_map::GridMap messageMap; + grid_map::GridMapRosConverter::fromMessage(*message, messageMap); + + if (blur) { + // Copy! + auto originalMap = messageMap.get(elevationLayer); + + // Blur (remove nan -> filter -> put back nan + grid_map::inpainting::minValues(messageMap, elevationLayer, "i"); + grid_map::smoothing::boxBlur(messageMap, "i", elevationLayer, 3, 1); + messageMap.get(elevationLayer) = (originalMap.array().isFinite()).select(messageMap.get(elevationLayer), originalMap); + } + + auto& elevation = messageMap.get(elevationLayer); + if (noiseLayer.size() != elevation.size()) { + createNoise(elevation.rows(), elevation.cols()); + } + + elevation += noiseLayer; + + grid_map_msgs::GridMap messageMapOut; + grid_map::GridMapRosConverter::toMessage(messageMap, messageMapOut); + publisher.publish(messageMapOut); +} + +int main(int argc, char** argv) { + ros::init(argc, argv, "noise_node"); + ros::NodeHandle nodeHandle("~"); + + if (!nodeHandle.getParam("frequency", frequency)) { + ROS_ERROR("[ConvexPlaneExtractionROS::NoiseNode] Could not read parameter `frequency`."); + return 1; + } + if (!nodeHandle.getParam("noiseGauss", noiseGauss)) { + ROS_ERROR("[ConvexPlaneExtractionROS:NoiseNode] Could not read parameter `noiseGauss`."); + return 1; + } + if (!nodeHandle.getParam("noiseUniform", noiseUniform)) { + ROS_ERROR("[ConvexPlaneExtractionROS:NoiseNode] Could not read parameter `noiseUniform`."); + return 1; + } + if (!nodeHandle.getParam("blur", blur)) { + ROS_ERROR("[ConvexPlaneExtractionROS:NoiseNode] Could not read parameter `blur`."); + return 1; + } + if (!nodeHandle.getParam("outlier_percentage", outlierPercentage)) { + ROS_ERROR("[ConvexPlaneExtractionROS:NoiseNode] Could not read parameter `outlier_percentage`."); + return 1; + } + if (!nodeHandle.getParam("elevation_topic_in", elevationMapTopicIn)) { + ROS_ERROR("[ConvexPlaneExtractionROS:NoiseNode] Could not read parameter `elevation_topic_in`."); + return 1; + } + if (!nodeHandle.getParam("elevation_topic_out", elevationMapTopicOut)) { + ROS_ERROR("[ConvexPlaneExtractionROS:NoiseNode] Could not read parameter `elevation_topic_out`."); + return 1; + } + if (!nodeHandle.getParam("height_layer", elevationLayer)) { + ROS_ERROR("[ConvexPlaneExtractionROS:NoiseNode] Could not read parameter `height_layer`."); + return 1; + } + + publisher = nodeHandle.advertise(elevationMapTopicOut, 1); + auto elevationMapSubscriber_ = nodeHandle.subscribe(elevationMapTopicIn, 1, callback); + + ros::Rate rate(frequency); + while (ros::ok()) { + ros::spinOnce(); + rate.sleep(); + } + + return 0; +} \ No newline at end of file diff --git a/plane_segmentation/convex_plane_decomposition_ros/test/TestShapeGrowing.cpp b/plane_segmentation/convex_plane_decomposition_ros/test/TestShapeGrowing.cpp new file mode 100644 index 00000000..0dcc4c92 --- /dev/null +++ b/plane_segmentation/convex_plane_decomposition_ros/test/TestShapeGrowing.cpp @@ -0,0 +1,46 @@ +// +// Created by rgrandia on 08.06.20. +// + +#include +#include +#include + +using namespace convex_plane_decomposition; + +int main(int argc, char** argv) { + CgalPolygonWithHoles2d parentShape; + parentShape.outer_boundary().push_back({0.0, 0.0}); + parentShape.outer_boundary().push_back({0.0, 1000.0}); + parentShape.outer_boundary().push_back({400.0, 800.0}); + parentShape.outer_boundary().push_back({1000.0, 1000.0}); + parentShape.outer_boundary().push_back({800.0, 400.0}); + parentShape.outer_boundary().push_back({1000.0, 0.0}); + parentShape.add_hole(createRegularPolygon(CgalPoint2d(200.0, 200.0), 50, 4)); + parentShape.add_hole(createRegularPolygon(CgalPoint2d(600.0, 700.0), 100, 100)); + + int numberOfVertices = 16; + double growthFactor = 1.05; + + cv::Size imgSize(1000, 1000); + const auto parentColor = randomColor(); + const auto fitColor = randomColor(); + + int N_test = 10; + for (int i = 0; i < N_test; i++) { + CgalPoint2d center = CgalPoint2d(rand() % imgSize.width, rand() % imgSize.height); + cv::Mat polygonImage(imgSize, CV_8UC3, cv::Vec3b(0, 0, 0)); + drawContour(polygonImage, parentShape, &parentColor); + drawContour(polygonImage, center, 2, &fitColor); + if (isInside(center, parentShape)) { + const auto fittedPolygon2d = growConvexPolygonInsideShape(parentShape, center, numberOfVertices, growthFactor); + drawContour(polygonImage, fittedPolygon2d, &fitColor); + } + + cv::namedWindow("TestShapeGrowing", cv::WindowFlags::WINDOW_NORMAL); + cv::imshow("TestShapeGrowing", polygonImage); + cv::waitKey(0); // Wait for a keystroke in the window + } + + return 0; +} diff --git a/plane_segmentation/grid_map_filters_rsl/CMakeLists.txt b/plane_segmentation/grid_map_filters_rsl/CMakeLists.txt new file mode 100644 index 00000000..47a9f1a4 --- /dev/null +++ b/plane_segmentation/grid_map_filters_rsl/CMakeLists.txt @@ -0,0 +1,81 @@ +cmake_minimum_required(VERSION 3.5) +project(grid_map_filters_rsl) + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() + +# Enable warnings +add_compile_options(-Wall -Wextra -Wpedantic) + +# Export compile commands +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +# Find dependencies +find_package(ament_cmake REQUIRED) +find_package(grid_map_cv REQUIRED) +find_package(grid_map_core REQUIRED) +find_package(Eigen3 REQUIRED) +find_package(ament_cmake_gtest REQUIRED) +find_package(sensor_msgs REQUIRED) # Added dependency +find_package(rosidl_runtime_cpp REQUIRED) # Added dependency + +# Debugging output +message(STATUS "grid_map_cv_FOUND: ${grid_map_cv_FOUND}") +message(STATUS "grid_map_cv_INCLUDE_DIRS: ${grid_map_cv_INCLUDE_DIRS}") +message(STATUS "grid_map_cv_LIBRARIES: ${grid_map_cv_LIBRARIES}") + +# Include directories +include_directories( + include + ${grid_map_cv_INCLUDE_DIRS} + ${grid_map_core_INCLUDE_DIRS} + ${EIGEN3_INCLUDE_DIRS} + ${sensor_msgs_INCLUDE_DIRS} # Ensure sensor_msgs include dirs are added + ${rosidl_runtime_cpp_INCLUDE_DIRS} # Ensure rosidl_runtime_cpp include dirs are added +) + +# Add library +add_library(${PROJECT_NAME} SHARED + src/GridMapDerivative.cpp + src/inpainting.cpp + src/lookup.cpp + src/smoothing.cpp + src/processing.cpp +) + +target_include_directories(${PROJECT_NAME} PUBLIC + $ + $ +) + +# Replace target_link_libraries with ament_target_dependencies +ament_target_dependencies(${PROJECT_NAME} + grid_map_cv + grid_map_core + sensor_msgs + rosidl_runtime_cpp +) + +# Install library +install(TARGETS ${PROJECT_NAME} + EXPORT export_${PROJECT_NAME} + ARCHIVE DESTINATION lib + LIBRARY DESTINATION lib + RUNTIME DESTINATION bin +) + +# Install headers +install(DIRECTORY include/${PROJECT_NAME}/ + DESTINATION include/${PROJECT_NAME}/ +) + + +# Clang tools (optional) +# find_package(cmake_clang_tools QUIET) +# if(cmake_clang_tools_FOUND) +# add_default_clang_tooling() +# endif() + +ament_package() \ No newline at end of file diff --git a/grid_map_filters_rsl/include/grid_map_filters_rsl/GridMapDerivative.hpp b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/GridMapDerivative.hpp similarity index 99% rename from grid_map_filters_rsl/include/grid_map_filters_rsl/GridMapDerivative.hpp rename to plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/GridMapDerivative.hpp index aa814d0f..41dd3ac4 100644 --- a/grid_map_filters_rsl/include/grid_map_filters_rsl/GridMapDerivative.hpp +++ b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/GridMapDerivative.hpp @@ -14,6 +14,7 @@ namespace grid_map { namespace derivative { + class GridMapDerivative { private: static constexpr int kernelSize_ = 5; diff --git a/grid_map_filters_rsl/include/grid_map_filters_rsl/inpainting.hpp b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/inpainting.hpp similarity index 99% rename from grid_map_filters_rsl/include/grid_map_filters_rsl/inpainting.hpp rename to plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/inpainting.hpp index 0573df4b..f8f89e5f 100644 --- a/grid_map_filters_rsl/include/grid_map_filters_rsl/inpainting.hpp +++ b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/inpainting.hpp @@ -39,7 +39,7 @@ void biLinearInterpolation(grid_map::GridMap& map, const std::string& layerIn, c * @param layerOut output layer (filtered map is written into this layer) * @param inpaintRadius vicinity considered by inpaint filter. */ -void nonlinearInterpolation(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, double inpaintRadius = 0.05); +void nonlinearInterpolation(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, double inpaintRadius); /** * @brief Up- or down-sample elevation map (open-cv function). In-place operation only. Only the layer with name "layer" is resampled, while @@ -49,5 +49,6 @@ void nonlinearInterpolation(grid_map::GridMap& map, const std::string& layerIn, * @param newRes new resolution. */ void resample(grid_map::GridMap& map, const std::string& layer, double newRes); + } // namespace inpainting } // namespace grid_map diff --git a/grid_map_filters_rsl/include/grid_map_filters_rsl/lookup.hpp b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/lookup.hpp similarity index 98% rename from grid_map_filters_rsl/include/grid_map_filters_rsl/lookup.hpp rename to plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/lookup.hpp index 7c91c20a..cd3bae45 100644 --- a/grid_map_filters_rsl/include/grid_map_filters_rsl/lookup.hpp +++ b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/lookup.hpp @@ -55,4 +55,4 @@ std::vector valuesBetweenLocations(const grid_map::Position grid_map::Position projectToMapWithMargin(const grid_map::GridMap& gridMap, const grid_map::Position& position, double margin = 1e-6); } // namespace lookup -} // namespace grid_map \ No newline at end of file +} // namespace grid_map diff --git a/grid_map_filters_rsl/include/grid_map_filters_rsl/processing.hpp b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/processing.hpp similarity index 100% rename from grid_map_filters_rsl/include/grid_map_filters_rsl/processing.hpp rename to plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/processing.hpp diff --git a/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/smoothing.hpp b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/smoothing.hpp new file mode 100644 index 00000000..99ae3e2b --- /dev/null +++ b/plane_segmentation/grid_map_filters_rsl/include/grid_map_filters_rsl/smoothing.hpp @@ -0,0 +1,50 @@ +/** + * @file smoothing.hpp + * @authors Fabian Jenelten + * @date 18.05, 2021 + * @affiliation ETH RSL + * @brief Smoothing and outlier rejection filters. + */ + +#pragma once + +// grid map. +#include + +namespace grid_map { +namespace smoothing { + +/** + * @brief Sequential median filter (open-cv function). In-place operation (layerIn = layerOut) is supported. + * @param map grid map + * @param layerIn reference layer (filter is applied wrt this layer) + * @param layerOut output layer (filtered map is written into this layer) + * @param kernelSize size of the smoothing window (must be an odd number) + * @param deltaKernelSize kernel size is increased by this value, if numberOfRepeats > 1 + * @param numberOfRepeats number of sequentially applied median filters (approaches to gaussian blurring if increased) + */ +void median(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int deltaKernelSize = 2, + int numberOfRepeats = 1); + +/** + * @brief Sequential box blur filter (open cv-function). In-place operation (layerIn = layerOut) is supported. + * @param map grid map + * @param layerIn reference layer (filter is applied wrt this layer) + * @param layerOut output layer (filtered map is written into this layer) + * @param kernelSize size of the smoothing window (should be an odd number, otherwise, introduces offset) + * @param numberOfRepeats number of sequentially applied blurring filters (approaches to gaussian blurring if increased) + */ +void boxBlur(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int numberOfRepeats = 1); + +/** + * @brief Gaussian blur filter (open cv-function). In-place operation (layerIn = layerOut) is supported. + * @param map grid map + * @param layerIn reference layer (filter is applied wrt this layer) + * @param layerOut output layer (filtered map is written into this layer) + * @param kernelSize size of the smoothing window (should be an odd number, otherwise, introduces offset) + * @param sigma variance + */ +void gaussianBlur(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, double sigma); + +} // namespace smoothing +} // namespace grid_map diff --git a/plane_segmentation/grid_map_filters_rsl/package.xml b/plane_segmentation/grid_map_filters_rsl/package.xml new file mode 100644 index 00000000..786e5637 --- /dev/null +++ b/plane_segmentation/grid_map_filters_rsl/package.xml @@ -0,0 +1,38 @@ + + + grid_map_filters_rsl + 0.1.0 + Extension of grid map filters package with OpenCV filters and others + + Fabian Jenelten + Fabian Jenelten + + MIT + + + ament_cmake + + + ament_cmake + + grid_map_cv + grid_map_core + eigen + + + rosidl_runtime_cpp + + + grid_map_cv + grid_map_core + sensor_msgs + + + ament_cmake_gtest + ament_cmake_pytest + + + + ament_cmake + + \ No newline at end of file diff --git a/grid_map_filters_rsl/src/GridMapDerivative.cpp b/plane_segmentation/grid_map_filters_rsl/src/GridMapDerivative.cpp similarity index 99% rename from grid_map_filters_rsl/src/GridMapDerivative.cpp rename to plane_segmentation/grid_map_filters_rsl/src/GridMapDerivative.cpp index 2fca08e3..1796dfde 100644 --- a/grid_map_filters_rsl/src/GridMapDerivative.cpp +++ b/plane_segmentation/grid_map_filters_rsl/src/GridMapDerivative.cpp @@ -71,5 +71,6 @@ Eigen::Vector2i GridMapDerivative::getKernelCenter(const grid_map::GridMap& grid } return centerId; } + } // namespace derivative } // namespace grid_map diff --git a/grid_map_filters_rsl/src/inpainting.cpp b/plane_segmentation/grid_map_filters_rsl/src/inpainting.cpp similarity index 98% rename from grid_map_filters_rsl/src/inpainting.cpp rename to plane_segmentation/grid_map_filters_rsl/src/inpainting.cpp index c82e82b9..a3aaa19b 100644 --- a/grid_map_filters_rsl/src/inpainting.cpp +++ b/plane_segmentation/grid_map_filters_rsl/src/inpainting.cpp @@ -198,7 +198,7 @@ void biLinearInterpolation(grid_map::GridMap& map, const std::string& layerIn, c // If failed, try again. if (!success) { map.get(layerIn) = map.get(layerOut); - return nonlinearInterpolation(map, layerIn, layerOut); + return nonlinearInterpolation(map, layerIn, layerOut, 2. * map.getResolution()); } } @@ -262,7 +262,7 @@ void resample(grid_map::GridMap& map, const std::string& layer, double newRes) { for (const auto& layer_name : layer_names) { Eigen::MatrixXf elevationMap = std::move(map.get(layer_name)); - // Convert elevation map ro open-cv image. + // Convert elevation map to open-cv image. cv::Mat elevationImage; cv::eigen2cv(elevationMap, elevationImage); diff --git a/grid_map_filters_rsl/src/lookup.cpp b/plane_segmentation/grid_map_filters_rsl/src/lookup.cpp similarity index 99% rename from grid_map_filters_rsl/src/lookup.cpp rename to plane_segmentation/grid_map_filters_rsl/src/lookup.cpp index f6759902..61409e7a 100644 --- a/grid_map_filters_rsl/src/lookup.cpp +++ b/plane_segmentation/grid_map_filters_rsl/src/lookup.cpp @@ -103,4 +103,4 @@ grid_map::Position projectToMapWithMargin(const grid_map::GridMap& gridMap, cons } } // namespace lookup -} // namespace grid_map \ No newline at end of file +} // namespace grid_map diff --git a/grid_map_filters_rsl/src/processing.cpp b/plane_segmentation/grid_map_filters_rsl/src/processing.cpp similarity index 99% rename from grid_map_filters_rsl/src/processing.cpp rename to plane_segmentation/grid_map_filters_rsl/src/processing.cpp index 2535feb5..1249937e 100644 --- a/grid_map_filters_rsl/src/processing.cpp +++ b/plane_segmentation/grid_map_filters_rsl/src/processing.cpp @@ -8,7 +8,6 @@ // grid map filters rsl. #include -#include namespace grid_map { namespace processing { diff --git a/plane_segmentation/grid_map_filters_rsl/src/smoothing.cpp b/plane_segmentation/grid_map_filters_rsl/src/smoothing.cpp new file mode 100644 index 00000000..24e89e04 --- /dev/null +++ b/plane_segmentation/grid_map_filters_rsl/src/smoothing.cpp @@ -0,0 +1,109 @@ +/** + * @file smoothing.cpp + * @authors Fabian Jenelten + * @date 18.05, 2021 + * @affiliation ETH RSL + * @brief Smoothing and outlier rejection filters. + */ + +// grid map filters rsl. +#include + +// open cv. +#include +#include +#include +#include +#include +#include + +namespace grid_map { +namespace smoothing { + +void median(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int deltaKernelSize, + int numberOfRepeats) { + // Create new layer if missing. + if (!map.exists(layerOut)) { + map.add(layerOut); + } + + if (kernelSize + deltaKernelSize * (numberOfRepeats - 1) <= 5) { + // Convert to image. + cv::Mat elevationImage; + cv::eigen2cv(map.get(layerIn), elevationImage); + + for (auto iter = 0; iter < numberOfRepeats; ++iter) { + cv::medianBlur(elevationImage, elevationImage, kernelSize); + kernelSize += deltaKernelSize; + } + + // Set output layer. + cv::cv2eigen(elevationImage, map.get(layerOut)); + } + + // Larger kernel sizes require a specific format. + else { + // Reference to in map. + const grid_map::Matrix& H_in = map.get(layerIn); + + // Convert grid map to image. + cv::Mat elevationImage; + const float minValue = H_in.minCoeffOfFinites(); + const float maxValue = H_in.maxCoeffOfFinites(); + grid_map::GridMapCvConverter::toImage(map, layerIn, CV_8UC1, minValue, maxValue, elevationImage); + + for (auto iter = 0; iter < numberOfRepeats; ++iter) { + cv::medianBlur(elevationImage, elevationImage, kernelSize); + kernelSize += deltaKernelSize; + } + + // Get image as float. + cv::Mat elevationImageFloat; + constexpr float maxUCharValue = 255.F; + elevationImage.convertTo(elevationImageFloat, CV_32F, (maxValue - minValue) / maxUCharValue, minValue); + + // Convert back to grid map. + cv::cv2eigen(elevationImageFloat, map.get(layerOut)); + } +} + +void boxBlur(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, int numberOfRepeats) { + // Create new layer if missing. + if (!map.exists(layerOut)) { + map.add(layerOut); + } + + // Convert to image. + cv::Mat elevationImage; + cv::eigen2cv(map.get(layerIn), elevationImage); + + // Box blur. + cv::Size kernelSize2D(kernelSize, kernelSize); + for (auto iter = 0; iter < numberOfRepeats; ++iter) { + cv::blur(elevationImage, elevationImage, kernelSize2D); + } + + // Set output layer. + cv::cv2eigen(elevationImage, map.get(layerOut)); +} + +void gaussianBlur(grid_map::GridMap& map, const std::string& layerIn, const std::string& layerOut, int kernelSize, double sigma) { + // Create new layer if missing. + if (!map.exists(layerOut)) { + map.add(layerOut); + } + + // Convert to image. + cv::Mat elevationImage; + cv::eigen2cv(map.get(layerIn), elevationImage); + + // Box blur. + cv::Size kernelSize2D(kernelSize, kernelSize); + cv::GaussianBlur(elevationImage, elevationImage, kernelSize2D, sigma); + + // Set output layer. + cv::cv2eigen(elevationImage, map.get(layerOut)); +} + +} // namespace smoothing +} // namespace grid_map diff --git a/grid_map_filters_rsl/test/TestDerivativeFilter.cpp b/plane_segmentation/grid_map_filters_rsl/test/TestDerivativeFilter.cpp similarity index 99% rename from grid_map_filters_rsl/test/TestDerivativeFilter.cpp rename to plane_segmentation/grid_map_filters_rsl/test/TestDerivativeFilter.cpp index d4026c9c..6f04270a 100644 --- a/grid_map_filters_rsl/test/TestDerivativeFilter.cpp +++ b/plane_segmentation/grid_map_filters_rsl/test/TestDerivativeFilter.cpp @@ -36,4 +36,4 @@ TEST(TestGridMapDerivative, initialization) { // NOLINT } EXPECT_TRUE(iterator.isPastEnd()); -} \ No newline at end of file +} diff --git a/grid_map_filters_rsl/test/TestFilters.cpp b/plane_segmentation/grid_map_filters_rsl/test/TestFilters.cpp similarity index 51% rename from grid_map_filters_rsl/test/TestFilters.cpp rename to plane_segmentation/grid_map_filters_rsl/test/TestFilters.cpp index 6d83fa75..c090a1e6 100644 --- a/grid_map_filters_rsl/test/TestFilters.cpp +++ b/plane_segmentation/grid_map_filters_rsl/test/TestFilters.cpp @@ -74,4 +74,71 @@ TEST(TestInpainting, minValuesOnlyNaN) { // NOLINT EXPECT_TRUE(map.get("filled_min_nan").hasNaN()); EXPECT_DOUBLE_EQ(map.get("filled_min_nonan").minCoeff(), 1.0); +} + +TEST(TestResampling, resampleSameSize) { // NOLINT + const std::string layerName = "layer"; + + GridMap map; + map.setGeometry(Length(3.0, 2.01), 0.33, Position(0.1, 0.2)); + map.add(layerName); + map.get(layerName).setRandom(); + + GridMap resampleMap = map; + + inpainting::resample(resampleMap, layerName, map.getResolution()); + + // Compare geometry + EXPECT_TRUE(resampleMap.getSize().isApprox(map.getSize())); + EXPECT_TRUE(resampleMap.getPosition().isApprox(map.getPosition())); + EXPECT_DOUBLE_EQ(resampleMap.getResolution(), map.getResolution()); + + // Compare content + EXPECT_TRUE(resampleMap.get(layerName).isApprox(map.get(layerName))); +} + +TEST(TestResampling, resampleUpsample) { // NOLINT + const std::string layerName = "layer"; + const double oldRes = 1.0; + const double newRes = 0.5; + + GridMap map; + map.setGeometry(Length(3.0, 2.0), oldRes, Position(0.1, 0.2)); + map.add(layerName); + map.get(layerName).setRandom(); + + GridMap resampleMap = map; + + inpainting::resample(resampleMap, layerName, newRes); + + // Compare geometry + const Eigen::Vector2d oldTrueSize(map.getResolution() * map.getSize().x(), map.getResolution() * map.getSize().y()); + const Eigen::Vector2d newTrueSize(resampleMap.getResolution() * resampleMap.getSize().x(), + resampleMap.getResolution() * resampleMap.getSize().y()); + EXPECT_TRUE(newTrueSize.isApprox(oldTrueSize)); + EXPECT_TRUE(resampleMap.getPosition().isApprox(map.getPosition())); + EXPECT_DOUBLE_EQ(resampleMap.getResolution(), newRes); +} + +TEST(TestResampling, resampleDownsample) { // NOLINT + const std::string layerName = "layer"; + const double oldRes = 0.5; + const double newRes = 1.0; + + GridMap map; + map.setGeometry(Length(3.0, 2.0), oldRes, Position(0.1, 0.2)); + map.add(layerName); + map.get(layerName).setRandom(); + + GridMap resampleMap = map; + + inpainting::resample(resampleMap, layerName, newRes); + + // Compare geometry + const Eigen::Vector2d oldTrueSize(map.getResolution() * map.getSize().x(), map.getResolution() * map.getSize().y()); + const Eigen::Vector2d newTrueSize(resampleMap.getResolution() * resampleMap.getSize().x(), + resampleMap.getResolution() * resampleMap.getSize().y()); + EXPECT_TRUE(newTrueSize.isApprox(oldTrueSize)); + EXPECT_TRUE(resampleMap.getPosition().isApprox(map.getPosition())); + EXPECT_DOUBLE_EQ(resampleMap.getResolution(), newRes); } \ No newline at end of file diff --git a/grid_map_filters_rsl/test/TestLookup.cpp b/plane_segmentation/grid_map_filters_rsl/test/TestLookup.cpp similarity index 99% rename from grid_map_filters_rsl/test/TestLookup.cpp rename to plane_segmentation/grid_map_filters_rsl/test/TestLookup.cpp index 0472b732..b85fb6d3 100644 --- a/grid_map_filters_rsl/test/TestLookup.cpp +++ b/plane_segmentation/grid_map_filters_rsl/test/TestLookup.cpp @@ -79,4 +79,4 @@ TEST(TestLookup, maxValue_onlyNaN) { // NOLINT const grid_map::Position position2(0.3, 0.4); const auto result = lookup::maxValueBetweenLocations(position1, position2, map, data); ASSERT_FALSE(result.isValid); -} \ No newline at end of file +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..fa3f3767 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +###### Requirements without Version Specifiers ######` +numpy +scipy==1.7 +ruamel.yaml +opencv-python +simple-parsing +scikit-image==0.19 +matplotlib +catkin-tools +networkx==3.0 +# cupy + +###### Requirements with Version Specifiers ######` +shapely==1.7.1 diff --git a/sensor_processing/debug_publisher/image_debug_node.py b/sensor_processing/debug_publisher/image_debug_node.py new file mode 100644 index 00000000..6d625a68 --- /dev/null +++ b/sensor_processing/debug_publisher/image_debug_node.py @@ -0,0 +1,38 @@ +import rospy +import sys + +import numpy as np +import ros_numpy +import matplotlib.pyplot as plt +from skimage.io import imshow + +import message_filters +from sensor_msgs.msg import Image, CameraInfo +from sensor_msgs.msg import PointCloud2, Image +from cv_bridge import CvBridge + + +class ImageDebugNode: + def __init__(self): + # setup pointcloud creation + self.cv_bridge = CvBridge() + # rospy.Subscriber("", CameraInfo, self.cam_info_callback) + rospy.Subscriber("/zed2i/zed_node/right/image_rect_color", Image, self.image_callback) + self.debug_pub = rospy.Publisher("/debug_image", Image, queue_size=2) + + def image_callback(self, rgb_msg): + img = self.cv_bridge.imgmsg_to_cv2(rgb_msg, desired_encoding="rgb8") + out = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint16) + out[:, : int(img.shape[1] / 2)] = 1 + seg_msg = self.cv_bridge.cv2_to_imgmsg(out, encoding="mono16") + seg_msg.header = rgb_msg.header + self.debug_pub.publish(seg_msg) + # seg_msg.header.frame_id = self.header.frame_id + # self.seg_pub.publish(seg_msg) + + +if __name__ == "__main__": + rospy.init_node("image_debug_node", anonymous=True, log_level=rospy.INFO) + print("Start Debug Image Node") + node = ImageDebugNode() + rospy.spin() diff --git a/sensor_processing/semantic_sensor/config/sensor_parameter.yaml b/sensor_processing/semantic_sensor/config/sensor_parameter.yaml new file mode 100644 index 00000000..81fde9b1 --- /dev/null +++ b/sensor_processing/semantic_sensor/config/sensor_parameter.yaml @@ -0,0 +1,31 @@ +front_cam_pointcloud: + channels: ['rgb', 'chair','sofa',"person" ] + fusion: ['color','class_average','class_average','class_average'] + topic_name: 'front_camera/semantic_pointcloud' + semantic_segmentation: True + publish_segmentation_image: True + segmentation_model: 'lraspp_mobilenet_v3_large' # detectron_coco_panoptic_fpn_R_101_3x, lraspp_mobilenet_v3_large + show_label_legend: False + data_type: pointcloud + + cam_info_topic: "camera/depth/camera_info" + image_topic: "camera/rgb/image_raw" + depth_topic: "camera/depth/image_raw" + cam_frame: "camera_rgb_optical_frame" + +front_cam_image: + channels: ['chair','sofa',"person"] + fusion_methods: ['exponential','exponential','exponential'] + publish_topic: 'semantic_image' + publish_image_topic: "semantic_image_debug" + publish_camera_info_topic: 'semantic_image_info' + publish_fusion_info_topic: 'semantic_image_fusion_info' + data_type: image + + semantic_segmentation: True + feature_extractor: True + segmentation_model: 'lraspp_mobilenet_v3_large' # detectron_coco_panoptic_fpn_R_101_3x, lraspp_mobilenet_v3_large + show_label_legend: False + image_topic: "camera/rgb/image_raw" + camera_info_topic: "camera/depth/camera_info" + resize: 0.5 \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/launch/semantic_image.launch b/sensor_processing/semantic_sensor/launch/semantic_image.launch new file mode 100644 index 00000000..8b6c01a1 --- /dev/null +++ b/sensor_processing/semantic_sensor/launch/semantic_image.launch @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/launch/semantic_image.launch.py b/sensor_processing/semantic_sensor/launch/semantic_image.launch.py new file mode 100644 index 00000000..32a190fd --- /dev/null +++ b/sensor_processing/semantic_sensor/launch/semantic_image.launch.py @@ -0,0 +1,14 @@ +from launch import LaunchDescription +from launch_ros.actions import Node + +def generate_launch_description(): + return LaunchDescription([ + Node( + package='semantic_sensor', + executable='image_node', + name='semantic_image', + arguments=['front_cam_image'], + output='screen', + parameters=[{'ros__parameters': 'config/sensor_parameter.yaml'}] + ) + ]) \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/launch/semantic_pointcloud.launch b/sensor_processing/semantic_sensor/launch/semantic_pointcloud.launch new file mode 100644 index 00000000..550638ff --- /dev/null +++ b/sensor_processing/semantic_sensor/launch/semantic_pointcloud.launch @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/launch/semantic_pointcloud.launch.py b/sensor_processing/semantic_sensor/launch/semantic_pointcloud.launch.py new file mode 100644 index 00000000..1ca06105 --- /dev/null +++ b/sensor_processing/semantic_sensor/launch/semantic_pointcloud.launch.py @@ -0,0 +1,14 @@ +from launch import LaunchDescription +from launch_ros.actions import Node + +def generate_launch_description(): + return LaunchDescription([ + Node( + package='semantic_sensor', + executable='pointcloud_node', + name='semantic_pointcloud', + arguments=['front_cam'], + output='screen', + parameters=[{'ros__parameters': 'config/sensor_parameter.yaml'}] + ) + ]) \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/package.xml b/sensor_processing/semantic_sensor/package.xml new file mode 100644 index 00000000..e4b66f31 --- /dev/null +++ b/sensor_processing/semantic_sensor/package.xml @@ -0,0 +1,25 @@ + + + semantic_sensor + 0.0.0 + The semantic_sensor package + Gian Erni + + MIT + + https://github.com/leggedrobotics/elevation_mapping_semantic_cupy + + + rclpy + tf2_ros + sensor_msgs + std_msgs + geometry_msgs + + python3 + + + + ament_python + + \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/resource/semantic_sensor b/sensor_processing/semantic_sensor/resource/semantic_sensor new file mode 100644 index 00000000..e69de29b diff --git a/sensor_processing/semantic_sensor/semantic_sensor/DINO/__init__.py b/sensor_processing/semantic_sensor/semantic_sensor/DINO/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sensor_processing/semantic_sensor/semantic_sensor/DINO/modules.py b/sensor_processing/semantic_sensor/semantic_sensor/DINO/modules.py new file mode 100644 index 00000000..24e4bff9 --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/DINO/modules.py @@ -0,0 +1,126 @@ +import semantic_sensor.DINO.vision_transformer as vits +import torch.nn as nn +import torch + + +class DinoFeaturizer(nn.Module): + def __init__(self, weights, cfg): + super().__init__() + self.cfg = cfg + self.dim = cfg.dim + + self.feat_type = self.cfg.dino_feat_type + arch = self.cfg.model + self.model = vits.__dict__[arch](patch_size=self.cfg.patch_size, num_classes=0) + for p in self.model.parameters(): + p.requires_grad = False + self.model.eval().cuda() + self.dropout = torch.nn.Dropout2d(p=0.1) + + if arch == "vit_small" and self.cfg.patch_size == 16: + url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth" + elif arch == "vit_small" and self.cfg.patch_size == 8: + url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth" + elif arch == "vit_base" and self.cfg.patch_size == 16: + url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth" + elif arch == "vit_base" and self.cfg.patch_size == 8: + url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth" + else: + raise ValueError("Unknown arch and patch size") + + state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url) + + self.model.load_state_dict(state_dict, strict=True) + + if arch == "vit_small": + self.n_feats = 384 + else: + self.n_feats = 768 + self.cluster1 = self.make_clusterer(self.n_feats) + self.proj_type = cfg.projection_type + if self.proj_type == "nonlinear": + self.cluster2 = self.make_nonlinear_clusterer(self.n_feats) + + def make_clusterer(self, in_channels): + """ + Function to make the clusterer model which consists of a single Conv2d layer. + The input channels are taken as the argument, and the output channels are equal to the `dim` of the model. + + Parameters: + in_channels (int): The number of input channels. + + Returns: + nn.Sequential: A sequential model consisting of a single Conv2d layer.""" + + return torch.nn.Sequential(torch.nn.Conv2d(in_channels, self.dim, (1, 1))) # , + + def make_nonlinear_clusterer(self, in_channels): + """ + Function to make the nonlinear clusterer model which consists of a series of Conv2d and ReLU layers. + The input channels are taken as the argument, and the output channels are equal to the `dim` of the model. + + Parameters: + in_channels (int): The number of input channels. + + Returns: + nn.Sequential: A sequential model consisting of a Conv2d layer, a ReLU layer, and another Conv2d layer. + """ + return torch.nn.Sequential( + torch.nn.Conv2d(in_channels, in_channels, (1, 1)), + torch.nn.ReLU(), + torch.nn.Conv2d(in_channels, self.dim, (1, 1)), + ) + + def forward(self, img, n=1, return_class_feat=False): + """ + Forward pass of the model. + The input image is passed through the `model` to get the intermediate features. + The intermediate features are then processed to get the final image feature and code. + If `return_class_feat` is set to True, the class features are returned instead. + + Parameters: + img (torch.Tensor): The input image tensor. + n (int, optional): The number of intermediate features to get. Default value is 1. 1 means only the features of the last block. + return_class_feat (bool, optional): Whether to return the class features. Default value is False. + + Returns: + tuple: If `cfg.dropout` is True, a tuple containing the final image feature and code is returned. + Otherwise, only the final image feature is returned. + If `return_class_feat` is True, the class features are returned instead. + """ + self.model.eval() + with torch.no_grad(): + assert img.shape[2] % self.cfg.patch_size == 0 + assert img.shape[3] % self.cfg.patch_size == 0 + + # get selected layer activations + feat, attn, qkv = self.model.get_intermediate_feat(img, n=n) + feat, attn, qkv = feat[0], attn[0], qkv[0] + + feat_h = img.shape[2] // self.cfg.patch_size + feat_w = img.shape[3] // self.cfg.patch_size + + if self.feat_type == "feat": + # deflatten + image_feat = feat[:, 1:, :].reshape(feat.shape[0], feat_h, feat_w, -1).permute(0, 3, 1, 2) + elif self.feat_type == "KK": + image_k = qkv[1, :, :, 1:, :].reshape(feat.shape[0], 6, feat_h, feat_w, -1) + B, H, I, J, D = image_k.shape + image_feat = image_k.permute(0, 1, 4, 2, 3).reshape(B, H * D, I, J) + else: + raise ValueError("Unknown feat type:{}".format(self.feat_type)) + + if return_class_feat: + return feat[:, :1, :].reshape(feat.shape[0], 1, 1, -1).permute(0, 3, 1, 2) + + if self.proj_type is not None: + code = self.cluster1(self.dropout(image_feat)) + if self.proj_type == "nonlinear": + code += self.cluster2(self.dropout(image_feat)) + else: + code = image_feat + + if self.cfg.dropout: + return self.dropout(image_feat), code + else: + return image_feat, code diff --git a/sensor_processing/semantic_sensor/semantic_sensor/DINO/utils.py b/sensor_processing/semantic_sensor/semantic_sensor/DINO/utils.py new file mode 100644 index 00000000..1bc84057 --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/DINO/utils.py @@ -0,0 +1,46 @@ +import torch +import warnings +import math + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2, + ) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): + # type: (Tensor, float, float, float, float) -> Tensor + return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/sensor_processing/semantic_sensor/semantic_sensor/DINO/vision_transformer.py b/sensor_processing/semantic_sensor/semantic_sensor/DINO/vision_transformer.py new file mode 100644 index 00000000..53abd17a --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/DINO/vision_transformer.py @@ -0,0 +1,385 @@ +""" +Mostly copy-paste from timm library. +https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +""" +import math +from functools import partial + +import torch +import torch.nn as nn +from semantic_sensor.DINO.utils import trunc_normal_ + + +def drop_path(x, drop_prob: float = 0.0, training: bool = False): + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Mlp(nn.Module): + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, + ): + super().__init__() + # number of attention heads + self.num_heads = num_heads + # dimension of each head + head_dim = dim // num_heads + # scale factor for the attention scores + self.scale = qk_scale or head_dim ** -0.5 + + # linear layer to project the input to 3 times the dimension + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + # dropout layer to drop out activations in the attention mechanism + self.attn_drop = nn.Dropout(attn_drop) + # linear layer to project the output back to the original dimension + self.proj = nn.Linear(dim, dim) + # dropout layer to drop out activations in the projection layer + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, return_qkv=False): + # get the batch size, sequence length, and input dimension + B, N, C = x.shape + # project the input to 3 times the dimension, multiplication with W matrix + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + # separate the input into queries: info about word in relation to others, keys: how one word influences others, and values: how word sees itself + q, k, v = qkv[0], qkv[1], qkv[2] + + # compute the attention scores + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + # drop out activations in the attention mechanism + attn = self.attn_drop(attn) + + # compute the weighted sum of values + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + # project the output back to the original dimension + x = self.proj(x) + # drop out activations in the projection layer + x = self.proj_drop(x) + + # return the attention mechanism and the projected input + return x, attn, qkv if return_qkv else x + + +class Block(nn.Module): + def __init__( + self, + dim, + num_heads, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop,) + + def forward(self, x, return_attention=False, return_qkv=False): + y, attn, qkv = self.attn(self.norm1(x)) + if return_attention: + return attn + x = x + self.drop_path(y) + x = x + self.drop_path(self.mlp(self.norm2(x))) + if return_qkv: + return x, attn, qkv + return x + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + num_patches = (img_size // patch_size) * (img_size // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class VisionTransformer(nn.Module): + """Vision Transformer""" + + def __init__( + self, + img_size=[224], + patch_size=16, + in_chans=3, + num_classes=0, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + **kwargs + ): + super().__init__() + + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = PatchEmbed( + img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + ) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + ) + for i in range(depth) + ] + ) + self.norm = norm_layer(embed_dim) + + # Classifier head + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=0.02) + trunc_normal_(self.cls_token, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def interpolate_pos_encoding(self, x, w, h): + npatch = x.shape[1] - 1 + N = self.pos_embed.shape[1] - 1 + if npatch == N and w == h: + return self.pos_embed + class_pos_embed = self.pos_embed[:, 0] + patch_pos_embed = self.pos_embed[:, 1:] + dim = x.shape[-1] + w0 = w // self.patch_embed.patch_size + h0 = h // self.patch_embed.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + w0, h0 = w0 + 0.1, h0 + 0.1 + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), + scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), + mode="bicubic", + ) + assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) + + def prepare_tokens(self, x): + B, nc, w, h = x.shape + x = self.patch_embed(x) # patch linear embedding + + # add the [CLS] token to the embed patch tokens + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + # add positional encoding to each token + x = x + self.interpolate_pos_encoding(x, w, h) + + return self.pos_drop(x) + + def forward(self, x): + x = self.prepare_tokens(x) + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + return x[:, 0] + + def forward_feats(self, x): + x = self.prepare_tokens(x) + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + return x + + def get_intermediate_feat(self, x, n=1): + x = self.prepare_tokens(x) + # we return the output tokens from the `n` last blocks + feat = [] + attns = [] + qkvs = [] + for i, blk in enumerate(self.blocks): + x, attn, qkv = blk(x, return_qkv=True) + if len(self.blocks) - i <= n: + feat.append(self.norm(x)) + qkvs.append(qkv) + attns.append(attn) + return feat, attns, qkvs + + def get_last_selfattention(self, x): + x = self.prepare_tokens(x) + for i, blk in enumerate(self.blocks): + if i < len(self.blocks) - 1: + x = blk(x) + else: + # return attention of the last block + return blk(x, return_attention=True) + + def get_intermediate_layers(self, x, n=1): + x = self.prepare_tokens(x) + # we return the output tokens from the `n` last blocks + output = [] + for i, blk in enumerate(self.blocks): + x = blk(x) + if len(self.blocks) - i <= n: + output.append(self.norm(x)) + return output + + +def vit_tiny(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, + embed_dim=192, + depth=12, + num_heads=3, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs + ) + return model + + +def vit_small(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs + ) + return model + + +def vit_base(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs + ) + return model + + +class DINOHead(nn.Module): + def __init__( + self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256, + ): + super().__init__() + nlayers = max(nlayers, 1) + if nlayers == 1: + self.mlp = nn.Linear(in_dim, bottleneck_dim) + else: + layers = [nn.Linear(in_dim, hidden_dim)] + if use_bn: + layers.append(nn.BatchNorm1d(hidden_dim)) + layers.append(nn.GELU()) + for _ in range(nlayers - 2): + layers.append(nn.Linear(hidden_dim, hidden_dim)) + if use_bn: + layers.append(nn.BatchNorm1d(hidden_dim)) + layers.append(nn.GELU()) + layers.append(nn.Linear(hidden_dim, bottleneck_dim)) + self.mlp = nn.Sequential(*layers) + self.apply(self._init_weights) + self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) + self.last_layer.weight_g.data.fill_(1) + if norm_last_layer: + self.last_layer.weight_g.requires_grad = False + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + x = self.mlp(x) + x = nn.functional.normalize(x, dim=-1, p=2) + x = self.last_layer(x) + return x diff --git a/sensor_processing/semantic_sensor/semantic_sensor/__init__.py b/sensor_processing/semantic_sensor/semantic_sensor/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sensor_processing/semantic_sensor/semantic_sensor/image_node.py b/sensor_processing/semantic_sensor/semantic_sensor/image_node.py new file mode 100755 index 00000000..5b22341f --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/image_node.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python + +import rospy +import sys + +import numpy as np +import cupy as cp +import cv2 + +np.float = np.float64 # temp fix for following import suggested at https://github.com/eric-wieser/ros_numpy/issues/37 +np.bool = np.bool_ +import ros_numpy +import matplotlib.pyplot as plt +from skimage.io import imshow + +import message_filters +from sensor_msgs.msg import Image, CameraInfo +from sensor_msgs.msg import PointCloud2, Image, CompressedImage +from cv_bridge import CvBridge + +from semantic_sensor.image_parameters import ImageParameter +from semantic_sensor.networks import resolve_model +from sklearn.decomposition import PCA + +from elevation_map_msgs.msg import ChannelInfo + + +class SemanticSegmentationNode: + def __init__(self, sensor_name): + """Get parameter from server, initialize variables and semantics, register publishers and subscribers. + + Args: + sensor_name (str): Name of the sensor in the ros param server. + """ + # TODO: if this is going to be loaded from another package we might need to change namespace + self.param: ImageParameter = ImageParameter() + self.param.feature_config.input_size = [80, 160] + namespace = rospy.get_name() + if rospy.has_param(namespace): + config = rospy.get_param(namespace) + self.param: ImageParameter = ImageParameter.from_dict(config[sensor_name]) + else: + print("NO ROS ENV found.") + + print("--------------Pointcloud Parameters-------------------") + print(self.param.dumps_yaml()) + print("--------------End of Parameters-----------------------") + self.semseg_color_map = None + # setup custom dtype + # setup semantics + self.feature_extractor = None + self.semantic_model = None + self.initialize_semantics() + + # setup pointcloud creation + self.cv_bridge = CvBridge() + self.P = None + self.header = None + self.register_sub_pub() + self.prediction_img = None + + def initialize_semantics(self): + if self.param.semantic_segmentation: + self.semantic_model = resolve_model(self.param.segmentation_model, self.param) + + if self.param.feature_extractor: + self.feature_extractor = resolve_model(self.param.feature_config.name, self.param.feature_config) + + def register_sub_pub(self): + """Register publishers and subscribers.""" + + node_name = rospy.get_name() + # subscribers + if self.param.camera_info_topic is not None and self.param.resize is not None: + rospy.Subscriber(self.param.camera_info_topic, CameraInfo, self.image_info_callback) + self.feat_im_info_pub = rospy.Publisher( + node_name + "/" + self.param.camera_info_topic + "_resized", CameraInfo, queue_size=2 + ) + + if "compressed" in self.param.image_topic: + self.compressed = True + self.subscriber = rospy.Subscriber( + self.param.image_topic, CompressedImage, self.image_callback, queue_size=2 + ) + else: + self.compressed = False + rospy.Subscriber(self.param.image_topic, Image, self.image_callback) + + # publishers + if self.param.semantic_segmentation: + self.seg_pub = rospy.Publisher(node_name + "/" + self.param.publish_topic, Image, queue_size=2) + self.seg_im_pub = rospy.Publisher(node_name + "/" + self.param.publish_image_topic, Image, queue_size=2) + self.semseg_color_map = self.color_map(len(self.param.channels)) + if self.param.show_label_legend: + self.color_map_viz() + if self.param.feature_extractor: + self.feature_pub = rospy.Publisher(node_name + "/" + self.param.feature_topic, Image, queue_size=2) + self.feat_im_pub = rospy.Publisher(node_name + "/" + self.param.feat_image_topic, Image, queue_size=2) + self.feat_channel_info_pub = rospy.Publisher( + node_name + "/" + self.param.feat_channel_info_topic, ChannelInfo, queue_size=2 + ) + + self.channel_info_pub = rospy.Publisher( + node_name + "/" + self.param.channel_info_topic, ChannelInfo, queue_size=2 + ) + + def color_map(self, N=256, normalized=False): + """Create a color map for the class labels. + + Args: + N (int): + normalized (bool): + """ + + def bitget(byteval, idx): + return (byteval & (1 << idx)) != 0 + + dtype = "float32" if normalized else "uint8" + cmap = np.zeros((N + 1, 3), dtype=dtype) + for i in range(N + 1): + r = g = b = 0 + c = i + for j in range(8): + r = r | (bitget(c, 0) << 7 - j) + g = g | (bitget(c, 1) << 7 - j) + b = b | (bitget(c, 2) << 7 - j) + c = c >> 3 + + cmap[i] = np.array([r, g, b]) + cmap[1] = np.array([81, 113, 162]) + cmap[2] = np.array([81, 113, 162]) + cmap[3] = np.array([188, 63, 59]) + cmap = cmap / 255 if normalized else cmap + return cmap[1:] + + def color_map_viz(self): + """Display the color map of the classes.""" + nclasses = len(self.param.channels) + row_size = 50 + col_size = 500 + if self.param.semantic_segmentation: + cmap = self.semseg_color_map + array = np.empty((row_size * (nclasses), col_size, cmap.shape[1]), dtype=cmap.dtype) + for i in range(nclasses): + array[i * row_size : i * row_size + row_size, :] = cmap[i] + imshow(array) + plt.yticks([row_size * i + row_size / 2 for i in range(nclasses)], self.param.channels) + plt.xticks([]) + plt.show() + + def image_info_callback(self, msg): + """Callback for camera info. + + Args: + msg (CameraInfo): + """ + self.P = np.array(msg.P).reshape(3, 4) + self.height = int(self.param.resize * msg.height) + self.width = int(self.param.resize * msg.width) + self.info = msg + self.info.height = self.height + self.info.width = self.width + self.P = np.array(msg.P).reshape(3, 4) + self.P[:2, :3] = self.P[:2, :3] * self.param.resize + self.info.K = self.P[:3, :3].flatten().tolist() + self.info.P = self.P.flatten().tolist() + + def image_callback(self, rgb_msg): + if self.P is None: + return + if self.compressed: + image = self.cv_bridge.compressed_imgmsg_to_cv2(rgb_msg) + if self.param.resize is not None: + image = cv2.resize(image, dsize=(self.width, self.height)) + image = cp.asarray(image) + else: + image = self.cv_bridge.imgmsg_to_cv2(rgb_msg, desired_encoding="rgb8") + if self.param.resize is not None: + image = cv2.resize(image, dsize=(self.width, self.height)) + image = cp.asarray(image) + self.header = rgb_msg.header + self.process_image(image) + + if self.param.semantic_segmentation: + self.publish_segmentation() + self.publish_segmentation_image() + self.publish_channel_info([f"sem_{c}" for c in self.param.channels], self.channel_info_pub) + if self.param.feature_extractor: + self.publish_feature() + self.publish_feature_image(self.features) + self.publish_channel_info([f"feat_{i}" for i in range(self.features.shape[0])], self.feat_channel_info_pub) + if self.param.resize is not None: + self.pub_info() + + def pub_info(self): + self.feat_im_info_pub.publish(self.info) + + def publish_channel_info(self, channels, pub): + """Publish fusion info.""" + info = ChannelInfo() + info.header = self.header + info.channels = channels + pub.publish(info) + + def process_image(self, image): + """Depending on setting generate color, semantic segmentation or feature channels. + + Args: + image: + u: + v: + points: + """ + if self.param.semantic_segmentation: + self.sem_seg = self.semantic_model["model"](image) + + if self.param.feature_extractor: + self.features = self.feature_extractor["model"](image) + + def publish_segmentation(self): + probabilities = self.sem_seg + img = probabilities.get() + img = np.transpose(img, (1, 2, 0)).astype(np.float32) + seg_msg = self.cv_bridge.cv2_to_imgmsg(img, encoding="passthrough") + seg_msg.header.frame_id = self.header.frame_id + seg_msg.header.stamp = self.header.stamp + self.seg_pub.publish(seg_msg) + + def publish_feature(self): + features = self.features + img = features.cpu().detach().numpy() + img = np.transpose(img, (1, 2, 0)).astype(np.float32) + feature_msg = self.cv_bridge.cv2_to_imgmsg(img, encoding="passthrough") + feature_msg.header.frame_id = self.header.frame_id + feature_msg.header.stamp = self.header.stamp + self.feature_pub.publish(feature_msg) + + def publish_segmentation_image(self): + colors = None + probabilities = self.sem_seg + if self.param.semantic_segmentation: + colors = cp.asarray(self.semseg_color_map) + assert colors.ndim == 2 and colors.shape[1] == 3 + + # prob = cp.zeros((len(self.param.channels),) + probabilities.shape[1:]) + # if "class_max" in self.param.fusion: + # # decode, create an array with all possible classes and insert probabilities + # it = 0 + # for iit, (chan, fuse) in enumerate(zip(self.param.channels, self.param.fusion)): + # if fuse in ["class_max"]: + # temp = probabilities[it] + # temp_p, temp_i = decode_max(temp) + # temp_i.choose(prob) + # c = cp.mgrid[0 : temp_i.shape[0], 0 : temp_i.shape[1]] + # prob[temp_i, c[0], c[1]] = temp_p + # it += 1 + # elif fuse in ["class_bayesian", "class_average"]: + # # assign fixed probability to correct index + # if chan in self.semantic_model["model"].segmentation_channels: + # prob[self.semantic_model["model"].segmentation_channels[chan]] = probabilities[it] + # it += 1 + # img = cp.argmax(prob, axis=0) + # + # else: + img = cp.argmax(probabilities, axis=0) + img = colors[img].astype(cp.uint8) # N x H x W x 3 + img = img.get() + seg_msg = self.cv_bridge.cv2_to_imgmsg(img, encoding="rgb8") + seg_msg.header.frame_id = self.header.frame_id + seg_msg.header.stamp = self.header.stamp + self.seg_im_pub.publish(seg_msg) + + def publish_feature_image(self, features): + data = np.reshape(features.cpu().detach().numpy(), (features.shape[0], -1)).T + n_components = 3 + pca = PCA(n_components=n_components).fit(data) + pca_descriptors = pca.transform(data) + img_pca = pca_descriptors.reshape(features.shape[1], features.shape[2], n_components) + comp = img_pca # [:, :, -3:] + comp_min = comp.min(axis=(0, 1)) + comp_max = comp.max(axis=(0, 1)) + comp_img = (comp - comp_min) / (comp_max - comp_min) + comp_img = (comp_img * 255).astype(np.uint8) + feat_msg = self.cv_bridge.cv2_to_imgmsg(comp_img, encoding="passthrough") + feat_msg.header.frame_id = self.header.frame_id + self.feat_im_pub.publish(feat_msg) + +def main(): + arg = sys.argv[1] + sensor_name = arg + rospy.init_node("semantic_segmentation_node", anonymous=True, log_level=rospy.INFO) + node = SemanticSegmentationNode(sensor_name) + rospy.spin() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/semantic_sensor/image_parameters.py b/sensor_processing/semantic_sensor/semantic_sensor/image_parameters.py new file mode 100644 index 00000000..6ad67027 --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/image_parameters.py @@ -0,0 +1,42 @@ +from dataclasses import dataclass, field +from simple_parsing.helpers import Serializable + + +@dataclass +class FeatureExtractorParameter(Serializable): + name: str = "DINO" + interpolation: str = "bilinear" + model: str = "vit_small" + patch_size: int = 16 + dim: int = 10 + dropout: bool = False + dino_feat_type: str = "feat" + projection_type: str = "nonlinear" + input_size: list = field(default_factory=lambda: [80, 160]) + pcl: bool = False + + +@dataclass +class ImageParameter(Serializable): + image_topic: str = "/alphasense_driver_ros/cam4/debayered" + + semantic_segmentation: bool = True + # sem_seg_topic: str = "semantic_seg" + # sem_seg_image_topic: str = "semantic_seg_im" + # publish_camera_info_topic: str = "semantic_seg/camera_info" + segmentation_model: str = "detectron_coco_panoptic_fpn_R_101_3x" + show_label_legend: bool = False + channels: list = field(default_factory=lambda: ["grass", "road", "tree", "sky"]) + + publish_topic: str = "semantic_seg" + publish_image_topic: str = "semantic_seg_img" + channel_info_topic: str = "channel_info" + + feature_extractor: bool = False + feature_config: FeatureExtractorParameter = FeatureExtractorParameter + # feature_config.input_size: list = field(default_factory=lambda: [80, 160]) + feature_topic: str = "semantic_seg_feat" + feat_image_topic: str = "semantic_seg_feat_im" + feat_channel_info_topic: str = "feat_channel_info" + resize: float = None + camera_info_topic: str = "camera_info" diff --git a/sensor_processing/semantic_sensor/semantic_sensor/networks.py b/sensor_processing/semantic_sensor/semantic_sensor/networks.py new file mode 100644 index 00000000..e64b4497 --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/networks.py @@ -0,0 +1,291 @@ +from torchvision.models.segmentation import fcn_resnet50, FCN_ResNet50_Weights +from torchvision.models.segmentation import ( + lraspp_mobilenet_v3_large, + LRASPP_MobileNet_V3_Large_Weights, +) +from torchvision.models.segmentation import deeplabv3_mobilenet_v3_large, DeepLabV3_MobileNet_V3_Large_Weights +import torch +import torchvision.transforms.functional as TF +from torchvision.transforms import Resize +import torch.nn.functional as NF +import numpy as np +import cupy as cp + +from detectron2.utils.logger import setup_logger + +setup_logger() + +from detectron2 import model_zoo +from detectron2.engine import DefaultPredictor +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog + +from semantic_sensor.DINO.modules import DinoFeaturizer + +from semantic_sensor.pointcloud_parameters import ( + PointcloudParameter, + FeatureExtractorParameter, +) +from semantic_sensor.utils import encode_max + + +def resolve_model(name, config=None): + """Get the model class based on the name of the pretrained model. + + Args: + name (str): Name of pretrained model + [fcn_resnet50,lraspp_mobilenet_v3_large,detectron_coco_panoptic_fpn_R_101_3x] + + Returns: + Dict[str, str]: + """ + if name == "fcn_resnet50": + weights = FCN_ResNet50_Weights.DEFAULT + net = fcn_resnet50 + model = PytorchModel(net, weights, config) + return { + "name": "fcn_resnet50", + "model": model, + } + elif name == "lraspp_mobilenet_v3_large": + weights = LRASPP_MobileNet_V3_Large_Weights.DEFAULT + net = lraspp_mobilenet_v3_large + model = PytorchModel(net, weights, config) + return { + "name": "lraspp_mobilenet_v3_large", + "model": model, + } + elif name == "deeplabv3_mobilenet_v3_large": + weights = DeepLabV3_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1 + net = deeplabv3_mobilenet_v3_large + model = PytorchModel(net, weights, config) + return { + "name": "deeplabv3_mobilenet_v3_large", + "model": model, + } + elif name == "detectron_coco_panoptic_fpn_R_101_3x": + # "Cityscapes/mask_rcnn_R_50_FPN.yaml" + # "Misc/semantic_R_50_FPN_1x.yaml" + # "Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml" + # "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml" + weights = "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml" + model = DetectronModel(weights, config) + return { + "name": "detectron_coco_panoptic_fpn_R_101_3x", + "model": model, + } + elif name == "DINO": + weights = config.model + str(config.patch_size) + model = STEGOModel(weights, config) + return { + "name": config.model + str(config.patch_size), + "model": model, + } + else: + raise NotImplementedError + + +class PytorchModel: + """ + + segemntation_cahnnels contains only classes + actual_channels: array of all channels of output + """ + + def __init__(self, net, weights, param): + self.model = net(weights=weights) + self.weights = weights + self.param = param + self.model.eval() + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.model.to(device=device) + self.resolve_categories() + + def resolve_categories(self): + """Create a segmentation_channels containing the actual values that are processed.""" + # get all classes + class_to_idx = {cls: idx for (idx, cls) in enumerate(self.get_classes())} + print( + "Semantic Segmentation possible channels: ", self.get_classes(), + ) + indices = [] + channels = [] + self.actual_channels = [] + # check correspondence of semseg and paramter classes, class_max + for it, chan in enumerate(self.param.channels): + if chan in [cls for cls in list(class_to_idx.keys())]: + indices.append(class_to_idx[chan]) + channels.append(chan) + self.actual_channels.append(chan) + else: + self.actual_channels.append(chan) + # elif self.param.fusion_methods[it] in ["class_average", "class_bayesian"]: + # print(chan, " is not in the semantic segmentation model.") + # for it, chan in enumerate(self.param.channels): + # self.actual_channels.append(chan) + # if self.param.fusion_methods[it] in ["class_max"]: + # self.actual_channels.append(chan) + # print( + # chan, + # " is not in the semantic segmentation model but is a max channel.", + # ) + # else: + # pass + self.stuff_categories = dict(zip(channels, indices)) + self.segmentation_channels = dict(zip(channels, indices)) + + def __call__(self, image, *args, **kwargs): + """Feedforward image through model and then create channels + + Args: + image (cupy._core.core.ndarray): + *args (): + **kwargs (): + + Returns: + torch.Tensor: + """ + batch = torch.as_tensor(image, device="cuda").permute(2, 0, 1).unsqueeze(0) + batch = TF.convert_image_dtype(batch, torch.float32) + with torch.no_grad(): + prediction = self.model(batch)["out"] + normalized_masks = torch.squeeze(prediction.softmax(dim=1), dim=0) + # get masks of fix classes + selected_masks = cp.asarray(normalized_masks[list(self.stuff_categories.values())]) + # get values of max, first remove the ones we already have + normalized_masks[list(self.stuff_categories.values())] = 0 + # for i in range(self.param.fusion_methods.count("class_max")): + # maxim, index = torch.max(normalized_masks, dim=0) + # mer = encode_max(maxim, index) + # selected_masks = cp.concatenate((selected_masks, cp.expand_dims(mer, axis=0)), axis=0) + # x = torch.arange(0, index.shape[0]) + # y = torch.arange(0, index.shape[1]) + # c = torch.meshgrid(x, y, indexing="ij") + # normalized_masks[index, c[0], c[1]] = 0 + assert len(self.actual_channels) == selected_masks.shape[0] + return cp.asarray(selected_masks) + + def get_classes(self): + """Get list of strings containing all the classes. + + Returns: + List[str]: List of classes + """ + return self.weights.meta["categories"] + + +class DetectronModel: + def __init__(self, weights, param): + self.cfg = get_cfg() + self.param = param + + # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library + self.cfg.merge_from_file(model_zoo.get_config_file(weights)) + self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model + # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well + self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(weights) + self.predictor = DefaultPredictor(self.cfg) + self.metadata = MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]) + self.stuff_categories, self.is_stuff = self.resolve_categories("stuff_classes") + self.thing_categories, self.is_thing = self.resolve_categories("thing_classes") + self.segmentation_channels = {} + for chan in self.param.channels: + if chan in self.stuff_categories.keys(): + self.segmentation_channels[chan] = self.stuff_categories[chan] + elif chan in self.thing_categories.keys(): + self.segmentation_channels[chan] = self.thing_categories[chan] + else: + # remove it + pass + self.actual_channels = self.segmentation_channels.keys() + + def resolve_categories(self, name): + classes = self.get_category(name) + class_to_idx = {cls: idx for (idx, cls) in enumerate(classes)} + print( + "Semantic Segmentation possible channels: ", classes, + ) + indices = [] + channels = [] + is_thing = [] + for it, channel in enumerate(self.param.channels): + if channel in [cls for cls in list(class_to_idx.keys())]: + indices.append(class_to_idx[channel]) + channels.append(channel) + is_thing.append(True) + elif self.param.fusion_methods[it] in ["class_average", "class_bayesian"]: + is_thing.append(False) + print(channel, " is not in the semantic segmentation model.") + categories = dict(zip(channels, indices)) + category_isthing = dict(zip(self.param.channels, is_thing)) + return categories, category_isthing + + def __call__(self, image, *args, **kwargs): + # TODO: there are some instruction on how to change input type + image = cp.flip(image, axis=2) + prediction = self.predictor(image.get()) + probabilities = cp.asarray(torch.softmax(prediction["sem_seg"], dim=0)) + output = cp.zeros((len(self.segmentation_channels), probabilities.shape[1], probabilities.shape[2],)) + # add semseg + output[cp.array(list(self.is_stuff.values()))] = probabilities[list(self.stuff_categories.values())] + # add instances + indices, instance_info = prediction["panoptic_seg"] + # TODO dont know why i need temp, look into how to avoid + temp = output[cp.array(list(self.is_thing.values()))] + for i, instance in enumerate(instance_info): + if instance is None or not instance["isthing"]: + continue + mask = cp.asarray((indices == instance["id"]).int()) + if instance["instance_id"] in self.thing_categories.values(): + temp[i] = mask * instance["score"] + output[cp.array(list(self.is_thing.values()))] = temp + return output + + def get_category(self, name): + return self.metadata.get(name) + + def get_classes(self): + return self.get_category("thing_classes") + self.get_category("stuff_classes") + + +class STEGOModel: + def __init__(self, weights, cfg): + self.cfg: FeatureExtractorParameter = cfg + self.model = DinoFeaturizer(weights, cfg=self.cfg) + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.model.to(self.device) + self.model.eval() + self.shrink = Resize(size=(self.cfg.input_size[0], self.cfg.input_size[1]), antialias=True) + + def to_tensor(self, data): + data = data.astype(np.float32) + if len(data.shape) == 3: # transpose image-like data + data = data.transpose(2, 0, 1) + elif len(data.shape) == 2: + data = data.reshape((1,) + data.shape) + if len(data.shape) == 3 and data.shape[0] == 3: # normalization of rgb images + data = data / 255.0 + tens = torch.as_tensor(data, device="cuda") + return tens + + def __call__(self, image, *args, **kwargs): + # image = torch.as_tensor(image, device="cuda").permute(2, 0, 1).unsqueeze(0) + image = self.to_tensor(image).unsqueeze(0) + # if self.cfg.pcl: + reset_size = Resize(image.shape[-2:], interpolation=TF.InterpolationMode.NEAREST, antialias=True) + im_size = image.shape[-2:] + image = self.shrink(image) + image = TF.normalize(image, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) + + feat1, code1 = self.model(image) + feat2, code2 = self.model(image.flip(dims=[3])) + + code = (code1 + code2.flip(dims=[3])) / 2 + # code = NF.interpolate(code, image.shape[-2:], mode=self.cfg.interpolation, align_corners=False).detach() + # if we just use first ten + # code = (feat1[:,:10] + feat2[:,:10].flip(dims=[3])) / 2 + + # if self.cfg.pcl: + code = NF.interpolate(code, im_size, mode=self.cfg.interpolation, align_corners=False).detach() + code = torch.squeeze(code, dim=0) + return code diff --git a/sensor_processing/semantic_sensor/semantic_sensor/pointcloud_node.py b/sensor_processing/semantic_sensor/semantic_sensor/pointcloud_node.py new file mode 100755 index 00000000..799527d4 --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/pointcloud_node.py @@ -0,0 +1,371 @@ +import rospy +import sys + +import numpy as np +import cupy as cp + +np.float = np.float64 # temp fix for following import suggested at https://github.com/eric-wieser/ros_numpy/issues/37 +import ros_numpy +import matplotlib.pyplot as plt +from skimage.io import imshow + +import message_filters +from sensor_msgs.msg import Image, CameraInfo +from sensor_msgs.msg import PointCloud2, Image +from cv_bridge import CvBridge + +from semantic_sensor.pointcloud_parameters import PointcloudParameter +from semantic_sensor.networks import resolve_model +from semantic_sensor.utils import decode_max +from sklearn.decomposition import PCA + + +class PointcloudNode: + def __init__(self, sensor_name): + """Get parameter from server, initialize variables and semantics, register publishers and subscribers. + + Args: + sensor_name (str): Name of the sensor in the ros param server. + """ + # TODO: if this is going to be loaded from another package we might need to change namespace + self.param: PointcloudParameter = PointcloudParameter() + self.param.feature_config.input_size = [80, 160] + namesp = rospy.get_name() + if rospy.has_param(namesp + "/subscribers"): + config = rospy.get_param(namesp + "/subscribers") + self.param: PointcloudParameter = PointcloudParameter.from_dict(config[sensor_name]) + else: + print("NO ROS ENV found.") + + self.param.sensor_name = sensor_name + print("--------------Pointcloud Parameters-------------------") + print(self.param.dumps_yaml()) + print("--------------End of Parameters-----------------------") + self.semseg_color_map = None + # setup custom dtype + self.create_custom_dtype() + # setup semantics + self.feature_extractor = None + self.semantic_model = None + self.segmentation_channels = None + self.feature_channels = None + self.initialize_semantics() + + # setup pointcloud creation + self.cv_bridge = CvBridge() + self.P = None + self.header = None + self.register_sub_pub() + self.prediction_img = None + self.feat_img = None + + def initialize_semantics(self): + """Resolve the feature and segmentation mode and create segmentation_channel and feature_channels. + + - segmentation_channels: is a dictionary that contains the segmentation channel names as key and the fusion algorithm as value. + - feature_channels: is a dictionary that contains the features channel names as key and the fusion algorithm as value. + """ + if self.param.semantic_segmentation: + self.semantic_model = resolve_model(self.param.segmentation_model, self.param) + self.segmentation_channels = {} + for i, (chan, fusion) in enumerate(zip(self.param.channels, self.param.fusion)): + if fusion in ["class_bayesian", "class_average", "class_max"]: + self.segmentation_channels[chan] = fusion + assert len(self.segmentation_channels.keys()) > 0 + if self.param.feature_extractor: + self.feature_extractor = resolve_model(self.param.feature_config.name, self.param.feature_config) + self.feature_channels = {} + for i, (chan, fusion) in enumerate(zip(self.param.channels, self.param.fusion)): + if fusion in ["average"]: + self.feature_channels[chan] = fusion + assert len(self.feature_channels.keys()) > 0 + + def register_sub_pub(self): + """Register publishers and subscribers.""" + # subscribers + rospy.Subscriber(self.param.cam_info_topic, CameraInfo, self.cam_info_callback) + rgb_sub = message_filters.Subscriber(self.param.image_topic, Image) + depth_sub = message_filters.Subscriber(self.param.depth_topic, Image) + if self.param.confidence: + confidence_sub = message_filters.Subscriber(self.param.confidence_topic, Image) + ts = message_filters.ApproximateTimeSynchronizer( + [depth_sub, rgb_sub, confidence_sub,], queue_size=10, slop=0.5, + ) + else: + ts = message_filters.ApproximateTimeSynchronizer([depth_sub, rgb_sub,], queue_size=10, slop=0.5,) + ts.registerCallback(self.image_callback) + + self.pcl_pub = rospy.Publisher(self.param.topic_name, PointCloud2, queue_size=2) + # publishers + if self.param.semantic_segmentation: + if self.param.publish_segmentation_image: + self.seg_pub = rospy.Publisher(self.param.segmentation_image_topic, Image, queue_size=2) + if "class_max" in self.param.fusion: + self.labels = self.semantic_model["model"].get_classes() + else: + self.labels = list(self.segmentation_channels.keys()) + self.semseg_color_map = self.color_map(len(self.labels)) + if self.param.show_label_legend: + self.color_map_viz() + if self.param.feature_extractor: + # todo + if True: + self.feat_pub = rospy.Publisher(self.param.feature_config.feature_image_topic, Image, queue_size=2) + + def color_map(self, N=256, normalized=False): + """Create a color map for the class labels. + + Args: + N (int): + normalized (bool): + """ + + def bitget(byteval, idx): + return (byteval & (1 << idx)) != 0 + + dtype = "float32" if normalized else "uint8" + cmap = np.zeros((N + 1, 3), dtype=dtype) + for i in range(N + 1): + r = g = b = 0 + c = i + for j in range(8): + r = r | (bitget(c, 0) << 7 - j) + g = g | (bitget(c, 1) << 7 - j) + b = b | (bitget(c, 2) << 7 - j) + c = c >> 3 + + cmap[i] = np.array([r, g, b]) + cmap[1] = np.array([188, 63, 59]) + cmap[2] = np.array([81, 113, 162]) + cmap[3] = np.array([136, 49, 132]) + cmap = cmap / 255 if normalized else cmap + return cmap[1:] + + def color_map_viz(self): + """Display the color map of the classes.""" + nclasses = len(self.labels) + row_size = 50 + col_size = 500 + if self.param.semantic_segmentation: + cmap = self.semseg_color_map + array = np.empty((row_size * (nclasses), col_size, cmap.shape[1]), dtype=cmap.dtype) + for i in range(nclasses): + array[i * row_size : i * row_size + row_size, :] = cmap[i] + imshow(array) + plt.yticks([row_size * i + row_size / 2 for i in range(nclasses)], self.labels) + plt.xticks([]) + plt.show() + + def create_custom_dtype(self): + """Generate a new dtype according to the channels in the params. + + Some channels might remain empty. + """ + self.dtype = [ + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ] + for chan, fus in zip(self.param.channels, self.param.fusion): + self.dtype.append((chan, np.float32)) + print(self.dtype) + + def cam_info_callback(self, msg): + """Subscribe to the camera infos to get projection matrix and header. + + Args: + msg: + """ + a = cp.asarray(msg.P) + self.P = cp.resize(a, (3, 4)) + self.height = msg.height + self.width = msg.width + self.header = msg.header + + def image_callback(self, depth_msg, rgb_msg=None, confidence_msg=None): + confidence = None + image = None + if self.P is None: + return + if rgb_msg is not None: + image = cp.asarray(self.cv_bridge.imgmsg_to_cv2(rgb_msg, desired_encoding="rgb8")) + depth = cp.asarray(self.cv_bridge.imgmsg_to_cv2(depth_msg, desired_encoding="passthrough")) + if confidence_msg is not None: + confidence = cp.asarray(self.cv_bridge.imgmsg_to_cv2(confidence_msg, desired_encoding="passthrough")) + + pcl = self.create_pcl_from_image(image, depth, confidence) + self.publish_pointcloud(pcl, depth_msg.header) + + if self.param.publish_segmentation_image: + self.publish_segmentation_image(self.prediction_img) + # todo + if self.param.publish_feature_image and self.param.feature_extractor: + self.publish_feature_image(self.feat_img) + + def create_pcl_from_image(self, image, depth, confidence): + """Generate the pointcloud from the depth map and process the image. + + Args: + image: + depth: + confidence: + + Returns: + + """ + u, v = self.get_coordinates(depth, confidence) + + # create pointcloud + world_x = (u.astype(np.float32) - self.P[0, 2]) * depth[v, u] / self.P[0, 0] + world_y = (v.astype(np.float32) - self.P[1, 2]) * depth[v, u] / self.P[1, 1] + world_z = depth[v, u] + points = np.zeros(world_x.shape, dtype=self.dtype) + points["x"] = cp.asnumpy(world_x) + points["y"] = cp.asnumpy(world_y) + points["z"] = cp.asnumpy(world_z) + self.process_image(image, u, v, points) + return points + + def get_coordinates(self, depth, confidence): + """Define which pixels are valid to generate the pointcloud. + + Args: + depth: + confidence: + + Returns: + + """ + pos = cp.where(depth > 0, 1, 0) + low = cp.where(depth < 8, 1, 0) + if confidence is not None: + conf = cp.where(confidence >= self.param.confidence_threshold, 1, 0) + else: + conf = cp.ones(pos.shape) + fin = cp.isfinite(depth) + temp = cp.maximum(cp.rint(fin + pos + conf + low - 2.6), 0) + mask = cp.nonzero(temp) + u = mask[1] + v = mask[0] + return u, v + + def process_image(self, image, u, v, points): + """Depending on setting generate color, semantic segmentation or feature channels. + + Args: + image: + u: + v: + points: + """ + if "color" in self.param.fusion: + valid_rgb = image[v, u].get() + r = np.asarray(valid_rgb[:, 0], dtype=np.uint32) + g = np.asarray(valid_rgb[:, 1], dtype=np.uint32) + b = np.asarray(valid_rgb[:, 2], dtype=np.uint32) + rgb_arr = np.array((r << 16) | (g << 8) | (b << 0), dtype=np.uint32) + rgb_arr.dtype = np.float32 + position = self.param.fusion.index("color") + points[self.param.channels[position]] = rgb_arr + + if self.segmentation_channels is not None: + self.perform_segmentation(image, points, u, v) + if self.feature_channels is not None: + self.extract_features(image, points, u, v) + + def perform_segmentation(self, image, points, u, v): + """Feedforward image through semseg NN and then append pixels to pcl and save image for publication. + + Args: + image: + points: + u: + v: + """ + prediction = self.semantic_model["model"](image) + values = prediction[:, v.get(), u.get()].get() + for it, channel in enumerate(self.semantic_model["model"].actual_channels): + points[channel] = values[it] + if self.param.publish_segmentation_image and self.param.semantic_segmentation: + self.prediction_img = prediction + + def extract_features(self, image, points, u, v): + """Feedforward image through feature extraction NN and then append pixels to pcl. + + Args: + image: + points: + u: + v: + """ + prediction = self.feature_extractor["model"](image) + values = prediction[:, v.get(), u.get()].cpu().detach().numpy() + for it, channel in enumerate(self.feature_channels.keys()): + points[channel] = values[it] + # todo + if False and self.param.feature_extractor: + self.feat_img = prediction + + def publish_segmentation_image(self, probabilities): + if self.param.semantic_segmentation: + colors = cp.asarray(self.semseg_color_map) + assert colors.ndim == 2 and colors.shape[1] == 3 + if self.P is None: + return + prob = cp.zeros((len(self.labels),) + probabilities.shape[1:]) + if "class_max" in self.param.fusion: + # decode, create an array with all possible classes and insert probabilities + it = 0 + for iit, (chan, fuse) in enumerate(zip(self.param.channels, self.param.fusion)): + if fuse in ["class_max"]: + temp = probabilities[it] + temp_p, temp_i = decode_max(temp) + temp_i.choose(prob) + c = cp.mgrid[0 : temp_i.shape[0], 0 : temp_i.shape[1]] + prob[temp_i, c[0], c[1]] = temp_p + it += 1 + elif fuse in ["class_bayesian", "class_average"]: + # assign fixed probability to correct index + if chan in self.semantic_model["model"].segmentation_channels: + prob[self.semantic_model["model"].segmentation_channels[chan]] = probabilities[it] + it += 1 + img = cp.argmax(prob, axis=0) + + else: + img = cp.argmax(probabilities, axis=0) + img = colors[img].astype(cp.uint8) # N x H x W x 3 + img = img.get() + seg_msg = self.cv_bridge.cv2_to_imgmsg(img, encoding="rgb8") + seg_msg.header.frame_id = self.header.frame_id + self.seg_pub.publish(seg_msg) + + def publish_feature_image(self, features): + data = np.reshape(features.cpu().detach().numpy(), (features.shape[0], -1)).T + n_components = 3 + pca = PCA(n_components=n_components).fit(data) + pca_descriptors = pca.transform(data) + img_pca = pca_descriptors.reshape(features.shape[1], features.shape[2], n_components) + comp = img_pca # [:, :, -3:] + comp_min = comp.min(axis=(0, 1)) + comp_max = comp.max(axis=(0, 1)) + comp_img = (comp - comp_min) / (comp_max - comp_min) + comp_img = (comp_img * 255).astype(np.uint8) + feat_msg = self.cv_bridge.cv2_to_imgmsg(comp_img, encoding="passthrough") + feat_msg.header.frame_id = self.header.frame_id + self.feat_pub.publish(feat_msg) + + def publish_pointcloud(self, pcl, header): + pc2 = ros_numpy.msgify(PointCloud2, pcl) + pc2.header = header + # pc2.header.frame_id = self.param.cam_frame + self.pcl_pub.publish(pc2) + +def main(): + arg = sys.argv[1] + sensor_name = sys.argv[1] + rospy.init_node("semantic_pointcloud_node", anonymous=True, log_level=rospy.INFO) + node = PointcloudNode(sensor_name) + rospy.spin() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/semantic_sensor/pointcloud_parameters.py b/sensor_processing/semantic_sensor/semantic_sensor/pointcloud_parameters.py new file mode 100644 index 00000000..a4837c67 --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/pointcloud_parameters.py @@ -0,0 +1,47 @@ +from dataclasses import dataclass, field +from simple_parsing.helpers import Serializable + + +@dataclass +class FeatureExtractorParameter(Serializable): + name: str = "DINO" + interpolation: str = "bilinear" + model: str = "vit_small" + patch_size: int = 16 + dim: int = 5 + dropout: bool = False + dino_feat_type: str = "feat" + projection_type: str = "nonlinear" + input_size: list = field(default_factory=lambda: [80, 160]) + feature_image_topic: str = "/semantic_sensor/feature_image" + pcl: bool = True + + +@dataclass +class PointcloudParameter(Serializable): + sensor_name: str = "camera" + topic_name: str = "/elvation_mapping/pointcloud_semantic" + channels: list = field(default_factory=lambda: ["rgb", "person", "grass", "tree", "max"]) + fusion: list = field( + default_factory=lambda: ["color", "class_average", "class_average", "class_average", "class_max",] + ) + + semantic_segmentation: bool = False + segmentation_model: str = "lraspp_mobilenet_v3_large" + publish_segmentation_image: bool = False + segmentation_image_topic: str = "/semantic_sensor/sem_seg" + pub_all: bool = False + show_label_legend: bool = False + + cam_info_topic: str = "/zed2i/zed_node/depth/camera_info" + image_topic: str = "/zed2i/zed_node/left/image_rect_color" + depth_topic: str = "/zed2i/zed_node/depth/depth_registered" + cam_frame: str = "zed2i_right_camera_optical_frame" + confidence: bool = False + confidence_topic: str = "/zed2i/zed_node/confidence/confidence_map" + confidence_threshold: int = 10 + + feature_extractor: bool = False + publish_feature_image: bool = False + feature_config: FeatureExtractorParameter = FeatureExtractorParameter + feature_config.input_size: list = field(default_factory=lambda: [80, 160]) diff --git a/sensor_processing/semantic_sensor/semantic_sensor/tests/__init__.py b/sensor_processing/semantic_sensor/semantic_sensor/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sensor_processing/semantic_sensor/semantic_sensor/tests/test_pointcloud.py b/sensor_processing/semantic_sensor/semantic_sensor/tests/test_pointcloud.py new file mode 100644 index 00000000..724b6b51 --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/tests/test_pointcloud.py @@ -0,0 +1,64 @@ +import pytest +from ..pointcloud_node import PointcloudNode +import cupy as cp +from ..utils import encode_max + +"""This test file only works if ros is installed and the ros master is running. +""" + + +@pytest.fixture() +def pointcloud_ex(cam_name, channels, fusion, semseg, segpub, showlbl): + node = PointcloudNode(cam_name) + node.param.channels = channels + node.param.fusion = fusion + node.param.semantic_segmentation = semseg + node.param.show_label_legend = showlbl + node.param.publish_segmentation_image = segpub + node.__init__(cam_name) + return node + + +@pytest.mark.parametrize( + "cam_name,channels, fusion,semseg", + [ + ("front_cam", ["feat_0", "feat_1"], ["average", "average"], False), + ("front_cam", ["feat_0", "feat_1"], ["class_max", "average"], True), + ("front_cam", ["feat_0", "feat_1"], ["class_bayesian", "average"], True), + ], +) +@pytest.mark.parametrize( + "segpub", [True, False], +) +@pytest.mark.parametrize( + "showlbl", [True, False], +) +def test_initialize(pointcloud_ex): + # todo here we can add more test + # params: semseg, channels, class max, publish seg, showlabellegend + pointcloud_ex.initialize_semantics() + pointcloud_ex.register_sub_pub() + + +# if semseg then segpub and showlbl might be displayed +@pytest.mark.parametrize( + "cam_name,channels, fusion,semseg,segpub,showlbl", + [ + ("front_cam", ["feat_0", "feat_1"], ["average", "average"], False, False, False,), + ("front_cam", ["feat_0", "feat_1"], ["class_max", "average"], True, True, True), + ("front_cam", ["feat_0", "feat_1"], ["class_max", "average"], True, False, False,), + ("front_cam", ["feat_0", "feat_1"], ["class_bayesian", "average"], True, True, True,), + ("front_cam", ["feat_0", "feat_1"], ["class_bayesian", "average"], True, False, False,), + ], +) +def test_pcl_creation(pointcloud_ex, channels, fusion, semseg, segpub, showlbl): + amount = 3 + if "class_max" in pointcloud_ex.param.fusion: + val = cp.random.rand(360, 640, amount, dtype=cp.float32).astype(cp.float16) + ind = cp.random.randint(0, 2, (360, 640, amount), dtype=cp.uint32).astype(cp.float32) + img = encode_max(val, ind) + else: + img = (cp.random.rand(360, 640, amount) * 255).astype(cp.int32) + pointcloud_ex.P = cp.random.rand(3, 4) + depth = cp.random.rand(360, 640) * 8 + pointcloud_ex.create_pcl_from_image(img, depth, None) diff --git a/sensor_processing/semantic_sensor/semantic_sensor/tests/test_utils.py b/sensor_processing/semantic_sensor/semantic_sensor/tests/test_utils.py new file mode 100644 index 00000000..e50ca9e5 --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/tests/test_utils.py @@ -0,0 +1,44 @@ +import pytest +from semantic_sensor.networks import resolve_model +import cupy as cp +import torch +from semantic_sensor.pointcloud_parameters import ( + FeatureExtractorParameter, + PointcloudParameter, +) + + +@pytest.mark.parametrize( + "model_name,channels, fusion", + [ + ("fcn_resnet50", [], []), + ("lraspp_mobilenet_v3_large", [], []), + ("detectron_coco_panoptic_fpn_R_101_3x", [], []), + ], +) +def test_semantic_segmentation(model_name, channels, fusion): + param = PointcloudParameter() + param.segmentation_model = model_name + m = resolve_model(model_name, param) + im_sz = [360, 640] + image = cp.random.random((im_sz[0], im_sz[1], 3)) + classes = m["model"].get_classes() + assert type(classes) is list + prediction = m["model"](image) + # assert prediction.shape == torch.Size([len(param.channels), im_sz[0], im_sz[1]]) + # assert (prediction <= 1).all() + # assert (0 <= prediction).all() + + +@pytest.mark.parametrize( + "model_name", ["DINO",], +) +def test_feature_extractor(model_name): + param = FeatureExtractorParameter() + param.name = model_name + m = resolve_model(model_name, param) + im_sz = [320, 640] + image = cp.random.random((im_sz[0], im_sz[1], 3)) + prediction = m["model"](image.get()) + assert prediction.shape == torch.Size([param.dim, im_sz[0], im_sz[1]]) + assert prediction.shape[-2:] == torch.Size([im_sz[0], im_sz[1]]) diff --git a/sensor_processing/semantic_sensor/semantic_sensor/utils.py b/sensor_processing/semantic_sensor/semantic_sensor/utils.py new file mode 100644 index 00000000..9959213f --- /dev/null +++ b/sensor_processing/semantic_sensor/semantic_sensor/utils.py @@ -0,0 +1,28 @@ +import numpy as np +import cupy as cp + +from detectron2.utils.logger import setup_logger + +setup_logger() + + +def encode_max(maxim, index): + maxim, index = cp.asarray(maxim, dtype=cp.float32), cp.asarray(index, dtype=cp.uint32) + # fuse them + maxim = maxim.astype(cp.float16) + maxim = maxim.view(cp.uint16) + maxim = maxim.astype(cp.uint32) + index = index.astype(cp.uint32) + mer = cp.array(cp.left_shift(index, 16) | maxim, dtype=cp.uint32) + mer = mer.view(cp.float32) + return mer + + +def decode_max(mer): + mer = mer.astype(cp.float32) + mer = mer.view(dtype=cp.uint32) + ma = cp.bitwise_and(mer, 0xFFFF, dtype=np.uint16) + ma = ma.view(np.float16) + ma = ma.astype(np.float32) + ind = cp.right_shift(mer, 16) + return ma, ind diff --git a/sensor_processing/semantic_sensor/setup.cfg b/sensor_processing/semantic_sensor/setup.cfg new file mode 100644 index 00000000..269422d8 --- /dev/null +++ b/sensor_processing/semantic_sensor/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/semantic_sensor +[install] +install_scripts=$base/lib/semantic_sensor \ No newline at end of file diff --git a/sensor_processing/semantic_sensor/setup.py b/sensor_processing/semantic_sensor/setup.py new file mode 100644 index 00000000..a6127608 --- /dev/null +++ b/sensor_processing/semantic_sensor/setup.py @@ -0,0 +1,33 @@ +from setuptools import setup +import os +from glob import glob + +package_name = 'semantic_sensor' + +setup( + name=package_name, + version='0.0.0', + packages=[package_name], + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + (os.path.join('share', package_name, 'launch'), + glob('launch/*.launch.py')), + (os.path.join('share', package_name, 'config'), + glob('config/*.yaml')) + ], + install_requires=['setuptools', 'torch', 'torchvision'], + zip_safe=True, + maintainer='Gian Erni', + maintainer_email='gerni@ethz.ch', + description='The semantic_sensor package', + license='MIT', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'pointcloud_node = semantic_sensor.pointcloud_node:main', + 'image_node = semantic_sensor.image_node:main', + ], + }, +)