diff --git a/CMakeLists.txt b/CMakeLists.txt index 4bc660649..5edc19844 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -123,6 +123,7 @@ set(RGL_SOURCE_FILES src/graph/FromMat3x4fRaysNode.cpp src/graph/FilterGroundPointsNode.cpp src/graph/RadarPostprocessPointsNode.cpp + src/graph/RadarTrackObjectsNode.cpp src/graph/SetRangeRaysNode.cpp src/graph/SetRaysRingIdsRaysNode.cpp src/graph/SetTimeOffsetsRaysNode.cpp diff --git a/extensions.repos b/extensions.repos index a886638bf..f283c9d2a 100644 --- a/extensions.repos +++ b/extensions.repos @@ -2,7 +2,7 @@ repositories: extensions/udp: type: git url: git@github.com:RobotecAI/RGL-extension-udp.git - version: develop + version: feature/q2-features extensions/snow: type: git diff --git a/include/rgl/api/core.h b/include/rgl/api/core.h index 5693d41d3..fa9153c8a 100644 --- a/include/rgl/api/core.h +++ b/include/rgl/api/core.h @@ -151,6 +151,22 @@ static_assert(std::is_trivial_v); static_assert(std::is_standard_layout_v); #endif +/** + * Radar object class used in object tracking. + */ +typedef enum : int32_t +{ + RGL_RADAR_CLASS_CAR = 0, + RGL_RADAR_CLASS_TRUCK, + RGL_RADAR_CLASS_MOTORCYCLE, + RGL_RADAR_CLASS_BICYCLE, + RGL_RADAR_CLASS_PEDESTRIAN, + RGL_RADAR_CLASS_ANIMAL, + RGL_RADAR_CLASS_HAZARD, + RGL_RADAR_CLASS_UNKNOWN, + RGL_RADAR_CLASS_COUNT +} rgl_radar_object_class_t; + /** * Represents on-GPU Mesh that can be referenced by Entities on the Scene. * Each Mesh can be referenced by any number of Entities on different Scenes. @@ -382,6 +398,16 @@ typedef enum : int32_t RGL_FIELD_DYNAMIC_FORMAT = 13842, } rgl_field_t; +/** + * Kinds of return type for multi-return LiDAR output. + */ +typedef enum : int32_t +{ + RGL_RETURN_TYPE_NOT_DIVERGENT = 0, + RGL_RETURN_TYPE_FIRST = 1, + RGL_RETURN_TYPE_LAST = 2, +} rgl_return_type_t; + /** * Helper enum for axis selection */ @@ -695,6 +721,26 @@ RGL_API rgl_status_t rgl_node_raytrace_configure_distortion(rgl_node_t node, boo */ RGL_API rgl_status_t rgl_node_raytrace_configure_non_hits(rgl_node_t node, float nearDistance, float farDistance); +/** + * Modifies RaytraceNode to apply mask for non-hit values. + * Masked rays will be non-hits and will have the default non-hit values no matter of raytracing result. + * @param node RaytraceNode to modify. + * @param rays_mask Pointer to the array of int32_t. 1 means point is hit, 0 means point is non-hit. + * @param rays_count Number of elements in the `points_mask` array. + */ +RGL_API rgl_status_t rgl_node_raytrace_configure_mask(rgl_node_t node, const int8_t* rays_mask, int32_t rays_count); + +/** + * Modifies RaytraceNode to set beam divergence. + * Beam divergence is used to calculate the beam width at the distance of hit point. + * Setting beam divergence > 0.0f is required to use query for multi-return results. + * Setting beam divergence == 0.0f disables multi-return. + * @param node RaytraceNode to modify. + * @param horizontal_beam_divergence Horizontal beam divergence in radians. + * @param vertical_beam_divergence Vertical beam divergence in radians. + */ +RGL_API rgl_status_t rgl_node_raytrace_configure_beam_divergence(rgl_node_t node, float horizontal_beam_divergence, float vertical_beam_divergence); + /** * Creates or modifies FormatPointsNode. * The Node converts internal representation into a binary format defined by the `fields` array. @@ -819,6 +865,41 @@ RGL_API rgl_status_t rgl_node_points_radar_postprocess(rgl_node_t* node, const r float cumulative_device_gain, float received_noise_mean, float received_noise_st_dev); +/** + * Creates or modifies RadarTrackObjectsNode. + * The Node takes radar detections point cloud as input (from rgl_node_points_radar_postprocess), groups detections into objects based on given thresholds (node parameters), + * and calculates various characteristics of these objects. The output from this Node is in the form of a point cloud, where point XYZ coordinates are tracked objects positions. + * Additionally, cloud points have tracked object IDs. Note that for correct calculation and publishing some of object characteristics (e.g. velocity) user has to call + * rgl_scene_set_time for current scene. + * Graph input: point cloud, containing additionally RGL_FIELD_DISTANCE_F32, RGL_FIELD_AZIMUTH_F32, RGL_FIELD_ELEVATION_F32, RGL_FIELD_RADIAL_SPEED_F32 and ENTITY_ID_I32. + * Graph output: point cloud, contains only XYZ_VEC3_F32 for tracked object positions and ENTITY_ID_I32 for their IDs. + * @param node If (*node) == nullptr, a new Node will be created. Otherwise, (*node) will be modified. + * @param object_distance_threshold The maximum distance between a radar detection and other closest detection classified as the same tracked object (in meters). + * @param object_azimuth_threshold The maximum azimuth difference between a radar detection and other closest detection classified as the same tracked object (in radians). + * @param object_elevation_threshold The maximum elevation difference between a radar detection and other closest detection classified as the same tracked object (in radians). + * @param object_radial_speed_threshold The maximum radial speed difference between a radar detection and other closest detection classified as the same tracked object (in meters per second). + * @param max_matching_distance The maximum distance between predicted (based on previous frame) and newly detected object position to match objects between frames (in meters). + * @param max_prediction_time_frame The maximum time that object state can be predicted until it will be declared lost unless measured again (in milliseconds). + * @param movement_sensitivity The maximum position change for an object to be qualified as stationary (in meters). + */ +RGL_API rgl_status_t rgl_node_points_radar_track_objects(rgl_node_t* node, float object_distance_threshold, + float object_azimuth_threshold, float object_elevation_threshold, + float object_radial_speed_threshold, float max_matching_distance, + float max_prediction_time_frame, float movement_sensitivity); + +/** + * Modifies RadarTrackObjectsNode to set entity ids to radar object classes mapping. + * This is necessary to call for RadarTrackObjectsNode to classify tracked objects correctly. If not set, objects will be classified as RGL_RADAR_CLASS_UNKNOWN by default. + * Note that entity can only belong to a single class - passing entity id multiple times in entity_ids array will result in the last version to be assigned. There is no + * limit on how many entities can have the same class. + * @param node RadarTrackObjectsNode to modify. + * @param entity_ids Array of RGL entity ids. + * @param object_classes Array of radar object classes. + * @param count Number of elements in entity_ids and object_classes arrays. + */ +RGL_API rgl_status_t rgl_node_points_radar_set_classes(rgl_node_t node, const int32_t* entity_ids, + const rgl_radar_object_class_t* object_classes, int32_t count); + /** * Creates or modifies FilterGroundPointsNode. * The Node adds RGL_FIELD_IS_GROUND_I32 which indicates the point is on the ground. Points are not removed. @@ -879,6 +960,17 @@ RGL_API rgl_status_t rgl_node_gaussian_noise_angular_hitpoint(rgl_node_t* node, RGL_API rgl_status_t rgl_node_gaussian_noise_distance(rgl_node_t* node, float mean, float st_dev_base, float st_dev_rise_per_meter); +/** + * Creates or modifies MultiReturnSwitchNode + * This is a special node which does not modify the data but acts as an adapter to the multi-return feature. + * Thanks to this node, user can attach unchanged pipelines to work with specific return type from multi-return raytracing. + * Graph input: point cloud (with multi-return fields) + * Graph output: point cloud (with a selected field from parent's multi-return point cloud) + * @param node If (*node) == nullptr, a new Node will be created. Otherwise, (*node) will be modified. + * @param return_type Return type to select from multi-return point cloud. + */ +RGL_API rgl_status_t rgl_node_multi_return_switch(rgl_node_t* node, rgl_return_type_t); + /** * Assigns value true to out_alive if the given node is known and has not been destroyed, * assigns value false otherwise. diff --git a/src/GPUFieldDescBuilder.hpp b/src/GPUFieldDescBuilder.hpp index 04b80cf51..24f429777 100644 --- a/src/GPUFieldDescBuilder.hpp +++ b/src/GPUFieldDescBuilder.hpp @@ -19,8 +19,6 @@ #include #include -#include -#include #include // Builder for GPUFieldDesc. Separated struct to avoid polluting gpu-visible header (gpu/GPUFieldDesc.hpp). diff --git a/src/Time.hpp b/src/Time.hpp index 8fc155e54..fe2f31730 100644 --- a/src/Time.hpp +++ b/src/Time.hpp @@ -31,6 +31,7 @@ struct Time static Time nanoseconds(uint64_t nanoseconds) { return Time(nanoseconds); } HostDevFn double asSeconds() const { return static_cast(timeNs) * 1.0e-9; }; + HostDevFn double asMilliseconds() const { return static_cast(timeNs) * 1.0e-6; }; HostDevFn uint64_t asNanoseconds() const { return timeNs; } #if RGL_BUILD_ROS2_EXTENSION diff --git a/src/api/apiCore.cpp b/src/api/apiCore.cpp index 76030574f..d5b60c5fa 100644 --- a/src/api/apiCore.cpp +++ b/src/api/apiCore.cpp @@ -919,6 +919,51 @@ void TapeCore::tape_node_raytrace_configure_non_hits(const YAML::Node& yamlNode, rgl_node_raytrace_configure_non_hits(node, yamlNode[1].as(), yamlNode[2].as()); } +RGL_API rgl_status_t rgl_node_raytrace_configure_mask(rgl_node_t node, const int8_t* rays_mask, int32_t rays_count) +{ + auto status = rglSafeCall([&]() { + RGL_API_LOG("rgl_node_raytrace_configure_mask(node={}, rays_mask={}, rays_count={})", repr(node), + repr(rays_mask, rays_count), rays_count); + CHECK_ARG(node != nullptr); + CHECK_ARG(rays_mask != nullptr); + CHECK_ARG(rays_count > 0); + RaytraceNode::Ptr raytraceNode = Node::validatePtr(node); + raytraceNode->setNonHitsMask(rays_mask, rays_count); + }); + TAPE_HOOK(node, TAPE_ARRAY(rays_mask, rays_count), rays_count); + return status; +} + +void TapeCore::tape_node_raytrace_configure_mask(const YAML::Node& yamlNode, PlaybackState& state) +{ + auto nodeId = yamlNode[0].as(); + rgl_node_t node = state.nodes.at(nodeId); + rgl_node_raytrace_configure_mask(node, state.getPtr(yamlNode[1]), yamlNode[2].as()); +} + +RGL_API rgl_status_t rgl_node_raytrace_configure_beam_divergence(rgl_node_t node, float horizontal_beam_divergence, + float vertical_beam_divergence) +{ + auto status = rglSafeCall([&]() { + RGL_API_LOG("rgl_node_raytrace_configure_beam_divergence(node={}, horizontal_divergence={}, vertical_divergence={})", repr(node), + horizontal_beam_divergence, vertical_beam_divergence); + CHECK_ARG(node != nullptr); + CHECK_ARG((horizontal_beam_divergence > 0.0f && vertical_beam_divergence > 0.0f) || + (horizontal_beam_divergence == 0.0f && vertical_beam_divergence == 0.0f)); + RaytraceNode::Ptr raytraceNode = Node::validatePtr(node); + raytraceNode->setBeamDivergence(horizontal_beam_divergence, vertical_beam_divergence); + }); + TAPE_HOOK(node, horizontal_beam_divergence, vertical_beam_divergence); + return status; +} + +void TapeCore::tape_node_raytrace_configure_beam_divergence(const YAML::Node& yamlNode, PlaybackState& state) +{ + auto nodeId = yamlNode[0].as(); + rgl_node_t node = state.nodes.at(nodeId); + rgl_node_raytrace_configure_beam_divergence(node, yamlNode[1].as(), yamlNode[2].as()); +} + RGL_API rgl_status_t rgl_node_points_format(rgl_node_t* node, const rgl_field_t* fields, int32_t field_count) { auto status = rglSafeCall([&]() { @@ -1131,6 +1176,71 @@ void TapeCore::tape_node_points_radar_postprocess(const YAML::Node& yamlNode, Pl state.nodes.insert({nodeId, node}); } +RGL_API rgl_status_t rgl_node_points_radar_track_objects(rgl_node_t* node, float object_distance_threshold, + float object_azimuth_threshold, float object_elevation_threshold, + float object_radial_speed_threshold, float max_matching_distance, + float max_prediction_time_frame, float movement_sensitivity) +{ + auto status = rglSafeCall([&]() { + RGL_API_LOG("rgl_node_points_radar_track_objects(node={}, object_distance_threshold={}, object_azimuth_threshold={}, " + "object_elevation_threshold={}, object_radial_speed_threshold={}, max_matching_distance={}, " + "max_prediction_time_frame={}, movement_sensitivity={})", + repr(node), object_distance_threshold, object_azimuth_threshold, object_elevation_threshold, + object_radial_speed_threshold, max_matching_distance, max_prediction_time_frame, movement_sensitivity); + CHECK_ARG(node != nullptr); + CHECK_ARG(object_distance_threshold > 0.0f); + CHECK_ARG(object_azimuth_threshold > 0.0f); + CHECK_ARG(object_elevation_threshold > 0.0f); + CHECK_ARG(object_radial_speed_threshold > 0.0f); + CHECK_ARG(max_matching_distance > 0.0f); + CHECK_ARG(max_prediction_time_frame > 0.0f); + CHECK_ARG(movement_sensitivity >= 0.0f); + + createOrUpdateNode(node, object_distance_threshold, object_azimuth_threshold, + object_elevation_threshold, object_radial_speed_threshold, + max_matching_distance, max_prediction_time_frame, movement_sensitivity); + }); + TAPE_HOOK(node, object_distance_threshold, object_azimuth_threshold, object_elevation_threshold, + object_radial_speed_threshold, max_matching_distance, max_prediction_time_frame, movement_sensitivity); + return status; +} + +void TapeCore::tape_node_points_radar_track_objects(const YAML::Node& yamlNode, PlaybackState& state) +{ + auto nodeId = yamlNode[0].as(); + rgl_node_t node = state.nodes.contains(nodeId) ? state.nodes.at(nodeId) : nullptr; + rgl_node_points_radar_track_objects(&node, yamlNode[1].as(), yamlNode[2].as(), yamlNode[3].as(), + yamlNode[4].as(), yamlNode[5].as(), yamlNode[6].as(), + yamlNode[7].as()); + state.nodes.insert({nodeId, node}); +} + +RGL_API rgl_status_t rgl_node_points_radar_set_classes(rgl_node_t node, const int32_t* entity_ids, + const rgl_radar_object_class_t* object_classes, int32_t count) +{ + auto status = rglSafeCall([&]() { + RGL_API_LOG("rgl_node_points_radar_set_classes(node={}, entity_ids={}, classes={}, count={})", repr(node), + repr(entity_ids, count), repr(object_classes, count), count); + CHECK_ARG(node != nullptr); + CHECK_ARG(entity_ids != nullptr); + CHECK_ARG(object_classes != nullptr); + CHECK_ARG(count >= 0); + RadarTrackObjectsNode::Ptr trackObjectsNode = Node::validatePtr(node); + trackObjectsNode->setObjectClasses(entity_ids, object_classes, count); + }); + TAPE_HOOK(node, TAPE_ARRAY(entity_ids, count), TAPE_ARRAY(object_classes, count), count); + return status; +} + +void TapeCore::tape_node_points_radar_set_classes(const YAML::Node& yamlNode, PlaybackState& state) +{ + auto nodeId = yamlNode[0].as(); + rgl_node_t node = state.nodes.contains(nodeId) ? state.nodes.at(nodeId) : nullptr; + rgl_node_points_radar_set_classes(node, state.getPtr(yamlNode[1]), + state.getPtr(yamlNode[2]), yamlNode[3].as()); + state.nodes.insert({nodeId, node}); +} + RGL_API rgl_status_t rgl_node_points_filter_ground(rgl_node_t* node, const rgl_vec3f* sensor_up_vector, float ground_angle_threshold) { @@ -1230,6 +1340,27 @@ void TapeCore::tape_node_gaussian_noise_distance(const YAML::Node& yamlNode, Pla state.nodes.insert({nodeId, node}); } +RGL_API rgl_status_t rgl_node_multi_return_switch(rgl_node_t* node, rgl_return_type_t return_type) +{ + auto status = rglSafeCall([&]() { + RGL_API_LOG("rgl_node_multi_return_switch(node={}, return_type={})", repr(node), return_type); + CHECK_ARG(node != nullptr); + + createOrUpdateNode(node, return_type); + }); + TAPE_HOOK(node, return_type); + return status; +} + +void TapeCore::tape_node_multi_return_switch(const YAML::Node& yamlNode, PlaybackState& state) +{ + auto nodeId = yamlNode[0].as(); + auto return_type = static_cast(yamlNode[1].as()); + rgl_node_t node = state.nodes.contains(nodeId) ? state.nodes.at(nodeId) : nullptr; + rgl_node_multi_return_switch(&node, return_type); + state.nodes.insert({nodeId, node}); +} + rgl_status_t rgl_node_is_alive(rgl_node_t node, bool* out_alive) { auto status = rglSafeCall([&]() { diff --git a/src/gpu/MultiReturn.hpp b/src/gpu/MultiReturn.hpp new file mode 100644 index 000000000..0d3b34e7d --- /dev/null +++ b/src/gpu/MultiReturn.hpp @@ -0,0 +1,28 @@ +// Copyright 2024 Robotec.AI +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#define MULTI_RETURN_BEAM_VERTICES 19 +#define MULTI_RETURN_BEAM_LAYERS 3 +#define MULTI_RETURN_BEAM_SAMPLES (1 + (MULTI_RETURN_BEAM_VERTICES * MULTI_RETURN_BEAM_LAYERS)) + +struct MultiReturnPointers +{ + Field::type* isHit; + Field::type* xyz; + Field::type* distance; +}; diff --git a/src/gpu/RaytraceRequestContext.hpp b/src/gpu/RaytraceRequestContext.hpp index b35554392..4ec29f791 100644 --- a/src/gpu/RaytraceRequestContext.hpp +++ b/src/gpu/RaytraceRequestContext.hpp @@ -16,6 +16,7 @@ #include #include +#include struct RaytraceRequestContext { @@ -27,7 +28,7 @@ struct RaytraceRequestContext float nearNonHitDistance; float farNonHitDistance; - const Mat3x4f* rays; + const Mat3x4f* raysWorld; size_t rayCount; Mat3x4f rayOriginToWorld; @@ -38,9 +39,11 @@ struct RaytraceRequestContext const int* ringIds; size_t ringIdsCount; - const float* rayTimeOffsets; + const float* rayTimeOffsetsMs; size_t rayTimeOffsetsCount; + const int8_t* rayMask; + OptixTraversableHandle scene; double sceneTime; float sceneDeltaTime; @@ -61,5 +64,10 @@ struct RaytraceRequestContext Field::type* elevation; Field::type* normal; Field::type* incidentAngle; + + // Multi-Return + float hBeamHalfDivergenceRad; + float vBeamHalfDivergenceRad; + MultiReturnPointers mrSamples; }; static_assert(std::is_trivially_copyable::value); diff --git a/src/gpu/nodeKernels.cu b/src/gpu/nodeKernels.cu index 63a2e98b0..ba45d5960 100644 --- a/src/gpu/nodeKernels.cu +++ b/src/gpu/nodeKernels.cu @@ -223,6 +223,40 @@ __global__ void kFilterGroundPoints(size_t pointCount, const Vec3f sensor_up_vec outNonGround[tid] = normalUpAngle > ground_angle_threshold; } +__global__ void kProcessBeamSamplesFirstLast(size_t beamCount, int samplesPerBeam, MultiReturnPointers beamSamples, + MultiReturnPointers first, MultiReturnPointers last, const Mat3x4f* beamsWorld) +{ + LIMIT(beamCount); + + const auto beamIdx = tid; + int firstIdx = -1; + int lastIdx = -1; + for (int sampleIdx = 0; sampleIdx < samplesPerBeam; ++sampleIdx) { + if (beamSamples.isHit[beamIdx * samplesPerBeam + sampleIdx] == 0) { + continue; + } + auto currentFirstDistance = firstIdx >= 0 ? beamSamples.distance[beamIdx * samplesPerBeam + firstIdx] : FLT_MAX; + auto currentLastDistance = lastIdx >= 0 ? beamSamples.distance[beamIdx * samplesPerBeam + lastIdx] : -FLT_MAX; + if (beamSamples.distance[beamIdx * samplesPerBeam + sampleIdx] < currentFirstDistance) { + firstIdx = sampleIdx; + } + if (beamSamples.distance[beamIdx * samplesPerBeam + sampleIdx] > currentLastDistance) { + lastIdx = sampleIdx; + } + } + Vec3f beamOrigin = beamsWorld[beamIdx] * Vec3f{0, 0, 0}; + Vec3f beamDir = ((beamsWorld[beamIdx] * Vec3f{0, 0, 1}) - beamOrigin).normalized(); + bool isHit = firstIdx >= 0; // Note that firstHit >= 0 implies lastHit >= 0 + first.isHit[beamIdx] = isHit; + last.isHit[beamIdx] = isHit; + if (isHit) { + first.distance[beamIdx] = beamSamples.distance[beamIdx * samplesPerBeam + firstIdx]; + last.distance[beamIdx] = beamSamples.distance[beamIdx * samplesPerBeam + lastIdx]; + first.xyz[beamIdx] = beamOrigin + beamDir * first.distance[beamIdx]; + last.xyz[beamIdx] = beamOrigin + beamDir * last.distance[beamIdx]; + } +} + void gpuFindCompaction(cudaStream_t stream, size_t pointCount, const int32_t* shouldCompact, CompactionIndexType* hitCountInclusive, size_t* outHitCount) @@ -295,3 +329,9 @@ void gpuRadarComputeEnergy(cudaStream_t stream, size_t count, float rayAzimuthSt run(kRadarComputeEnergy, stream, count, rayAzimuthStepRad, rayElevationStepRad, freq, lookAtOriginTransform, rayPose, hitDist, hitNorm, hitPos, outBUBRFactor); } + +void gpuProcessBeamSamplesFirstLast(cudaStream_t stream, size_t beamCount, int samplesPerBeam, MultiReturnPointers beamSamples, + MultiReturnPointers first, MultiReturnPointers last, const Mat3x4f* beamsWorld) +{ + run(kProcessBeamSamplesFirstLast, stream, beamCount, samplesPerBeam, beamSamples, first, last, beamsWorld); +} diff --git a/src/gpu/nodeKernels.hpp b/src/gpu/nodeKernels.hpp index f05cfd602..0703aa9fb 100644 --- a/src/gpu/nodeKernels.hpp +++ b/src/gpu/nodeKernels.hpp @@ -22,6 +22,7 @@ #include #include #include +#include /* * The following functions are asynchronous! @@ -50,4 +51,6 @@ void gpuFilterGroundPoints(cudaStream_t stream, size_t pointCount, const Vec3f s void gpuRadarComputeEnergy(cudaStream_t stream, size_t count, float rayAzimuthStepRad, float rayElevationStepRad, float freq, Mat3x4f lookAtOriginTransform, const Field::type* rayPose, const Field::type* hitDist, const Field::type* hitNorm, - const Field::type* hitPos, Vector<3, thrust::complex>* outBUBRFactor); \ No newline at end of file + const Field::type* hitPos, Vector<3, thrust::complex>* outBUBRFactor); +void gpuProcessBeamSamplesFirstLast(cudaStream_t stream, size_t beamCount, int samplesPerBeam, MultiReturnPointers beamSamples, + MultiReturnPointers first, MultiReturnPointers last, const Mat3x4f* beamWorld); \ No newline at end of file diff --git a/src/gpu/optixPrograms.cu b/src/gpu/optixPrograms.cu index 0a1418eca..1f4b131fb 100644 --- a/src/gpu/optixPrograms.cu +++ b/src/gpu/optixPrograms.cu @@ -26,105 +26,39 @@ #include +// Constants static constexpr float toDeg = (180.0f / M_PI); +// Globals extern "C" static __constant__ RaytraceRequestContext ctx; -struct Vec3fPayload -{ - unsigned p0; - unsigned p1; - unsigned p2; -}; - -__forceinline__ __device__ Vec3fPayload encodePayloadVec3f(const Vec3f& src) -{ - Vec3fPayload dst; - dst.p0 = *(reinterpret_cast(&src[0])); - dst.p1 = *(reinterpret_cast(&src[1])); - dst.p2 = *(reinterpret_cast(&src[2])); - return dst; -} - -__forceinline__ __device__ Vec3f decodePayloadVec3f(const Vec3fPayload& src) -{ - return Vec3f{ - *reinterpret_cast(&src.p0), - *reinterpret_cast(&src.p1), - *reinterpret_cast(&src.p2), - }; -} - +// Helper functions template -__forceinline__ __device__ void saveRayResult(const Vec3f& xyz, float distance, float intensity, const int objectID, - const Vec3f& absVelocity, const Vec3f& relVelocity, float radialSpeed, - const Vec3f& normal, float incidentAngle) -{ - const int rayIdx = optixGetLaunchIndex().x; - if (ctx.xyz != nullptr) { - // Return actual XYZ of the hit point or infinity vector. - ctx.xyz[rayIdx] = xyz; - } - if (ctx.isHit != nullptr) { - ctx.isHit[rayIdx] = isFinite; - } - if (ctx.rayIdx != nullptr) { - ctx.rayIdx[rayIdx] = rayIdx; - } - if (ctx.ringIdx != nullptr && ctx.ringIds != nullptr) { - ctx.ringIdx[rayIdx] = ctx.ringIds[rayIdx % ctx.ringIdsCount]; - } - if (ctx.distance != nullptr) { - ctx.distance[rayIdx] = distance; - } - if (ctx.intensity != nullptr) { - ctx.intensity[rayIdx] = intensity; - } - if (ctx.timestamp != nullptr) { - ctx.timestamp[rayIdx] = ctx.sceneTime; - } - if (ctx.entityId != nullptr) { - ctx.entityId[rayIdx] = isFinite ? objectID : RGL_ENTITY_INVALID_ID; - } - if (ctx.pointAbsVelocity != nullptr) { - ctx.pointAbsVelocity[rayIdx] = absVelocity; - } - if (ctx.pointRelVelocity != nullptr) { - ctx.pointRelVelocity[rayIdx] = relVelocity; - } - if (ctx.radialSpeed != nullptr) { - ctx.radialSpeed[rayIdx] = radialSpeed; - } - if (ctx.normal != nullptr) { - ctx.normal[rayIdx] = normal; - } - if (ctx.incidentAngle != nullptr) { - ctx.incidentAngle[rayIdx] = incidentAngle; - } -} - -__forceinline__ __device__ void saveNonHitRayResult(float nonHitDistance) -{ - Mat3x4f ray = ctx.rays[optixGetLaunchIndex().x]; - Vec3f origin = ray * Vec3f{0, 0, 0}; - Vec3f dir = ray * Vec3f{0, 0, 1} - origin; - Vec3f displacement = dir.normalized() * nonHitDistance; - displacement = {isnan(displacement.x()) ? 0 : displacement.x(), isnan(displacement.y()) ? 0 : displacement.y(), - isnan(displacement.z()) ? 0 : displacement.z()}; - Vec3f xyz = origin + displacement; - saveRayResult(xyz, nonHitDistance, 0, RGL_ENTITY_INVALID_ID, Vec3f{NAN}, Vec3f{NAN}, 0.001f, Vec3f{NAN}, NAN); -} +__device__ void saveRayResult(const Vec3f& xyz, float distance, float intensity, int objectID, const Vec3f& absVelocity, + const Vec3f& relVelocity, float radialSpeed, const Vec3f& normal, float incidentAngle); +__device__ void saveNonHitRayResult(float nonHitDistance); +__device__ void shootSamplingRay(const Mat3x4f& ray, float maxRange, unsigned sampleBeamIdx); +__device__ Mat3x4f makeBeamSampleRayTransform(float hHalfDivergenceRad, float vHalfDivergenceRad, unsigned layerIdx, unsigned vertexIdx); extern "C" __global__ void __raygen__() { if (ctx.scene == 0) { + saveNonHitRayResult( + ctx.farNonHitDistance); // TODO(prybicki): Remove this, assume that host code will not pass invalid scene. + return; + } + + const int rayIdx = static_cast(optixGetLaunchIndex().x); + + if (ctx.rayMask != nullptr && ctx.rayMask[rayIdx] == 0) { saveNonHitRayResult(ctx.farNonHitDistance); return; } - const int rayIdx = optixGetLaunchIndex().x; - Mat3x4f ray = ctx.rays[rayIdx]; - const Mat3x4f rayLocal = ctx.rayOriginToWorld.inverse() * ray; + Mat3x4f ray = ctx.raysWorld[rayIdx]; + const Mat3x4f rayLocal = + ctx.rayOriginToWorld.inverse() * + ray; // TODO(prybicki): instead of computing inverse, we should pass rays in local CF and then transform them to world CF. // Assuming up vector is Y, forward vector is Z (true for Unity). // TODO(msz-rai): allow to define up and forward vectors in RGL @@ -141,69 +75,95 @@ extern "C" __global__ void __raygen__() // Velocities are in the local frame. Need to operate on rays in local frame. // Ray time offsets are in milliseconds, velocities are in unit per seconds. // In order to not lose numerical precision, first multiply values and then convert to proper unit. - ray = Mat3x4f::TRS((ctx.rayTimeOffsets[rayIdx] * ctx.sensorLinearVelocityXYZ) * 0.001f, - (ctx.rayTimeOffsets[rayIdx] * (ctx.sensorAngularVelocityRPY * toDeg)) * 0.001f) * + ray = Mat3x4f::TRS((ctx.rayTimeOffsetsMs[rayIdx] * ctx.sensorLinearVelocityXYZ) * 0.001f, + (ctx.rayTimeOffsetsMs[rayIdx] * (ctx.sensorAngularVelocityRPY * toDeg)) * 0.001f) * rayLocal; // Back to the global frame. ray = ctx.rayOriginToWorld * ray; } - Vec3f origin = ray * Vec3f{0, 0, 0}; - Vec3f dir = ray * Vec3f{0, 0, 1} - origin; float maxRange = ctx.rayRangesCount == 1 ? ctx.rayRanges[0].y() : ctx.rayRanges[rayIdx].y(); - unsigned int flags = OPTIX_RAY_FLAG_DISABLE_ANYHIT; - Vec3fPayload originPayload = encodePayloadVec3f(origin); - optixTrace(ctx.scene, origin, dir, 0.0f, maxRange, 0.0f, OptixVisibilityMask(255), flags, 0, 1, 0, originPayload.p0, - originPayload.p1, originPayload.p2); + shootSamplingRay(ray, maxRange, 0); // Shoot primary ray + if (ctx.hBeamHalfDivergenceRad > 0.0f && ctx.vBeamHalfDivergenceRad > 0.0f) { + // Shoot multi-return sampling rays + for (int layerIdx = 0; layerIdx < MULTI_RETURN_BEAM_LAYERS; ++layerIdx) { + for (int vertexIdx = 0; vertexIdx < MULTI_RETURN_BEAM_VERTICES; vertexIdx++) { + Mat3x4f sampleRay = ray * makeBeamSampleRayTransform(ctx.hBeamHalfDivergenceRad, ctx.vBeamHalfDivergenceRad, layerIdx, vertexIdx); + // Sampling rays indexes start from 1, 0 is reserved for the primary ray + const unsigned beamSampleRayIdx = 1 + layerIdx * MULTI_RETURN_BEAM_VERTICES + vertexIdx; + shootSamplingRay(sampleRay, maxRange, beamSampleRayIdx); + } + } + } +} + +extern "C" __global__ void __miss__() +{ + const int beamIdx = static_cast(optixGetLaunchIndex().x); + const int beamSampleIdx = static_cast(optixGetPayload_0()); + ctx.mrSamples.isHit[beamIdx * MULTI_RETURN_BEAM_SAMPLES + beamSampleIdx] = false; + if (beamSampleIdx == 0) { + saveNonHitRayResult(ctx.farNonHitDistance); + } } extern "C" __global__ void __closesthit__() { - const EntitySBTData& entityData = *(const EntitySBTData*) optixGetSbtDataPointer(); + const EntitySBTData& entityData = *(reinterpret_cast(optixGetSbtDataPointer())); - const int primID = optixGetPrimitiveIndex(); + // Triangle + const int primID = static_cast(optixGetPrimitiveIndex()); assert(primID < entityData.indexCount); const Vec3i triangleIndices = entityData.index[primID]; const float u = optixGetTriangleBarycentrics().x; const float v = optixGetTriangleBarycentrics().y; - assert(triangleIndices.x() < entityData.vertexCount); assert(triangleIndices.y() < entityData.vertexCount); assert(triangleIndices.z() < entityData.vertexCount); - const Vec3f& A = entityData.vertex[triangleIndices.x()]; const Vec3f& B = entityData.vertex[triangleIndices.y()]; const Vec3f& C = entityData.vertex[triangleIndices.z()]; - Vec3f hitObject = Vec3f((1 - u - v) * A + u * B + v * C); - Vec3f hitWorld = optixTransformPointFromObjectToWorldSpace(hitObject); - - int objectID = optixGetInstanceId(); - - Vec3f origin = decodePayloadVec3f({optixGetPayload_0(), optixGetPayload_1(), optixGetPayload_2()}); + // Ray + const int beamIdx = static_cast(optixGetLaunchIndex().x); + const unsigned beamSampleRayIdx = optixGetPayload_0(); + const unsigned circleIdx = (beamSampleRayIdx - 1) / MULTI_RETURN_BEAM_VERTICES; + const unsigned vertexIdx = (beamSampleRayIdx - 1) % MULTI_RETURN_BEAM_VERTICES; + const unsigned mrSamplesIdx = beamIdx * MULTI_RETURN_BEAM_SAMPLES + beamSampleRayIdx; + const Vec3f beamSampleOrigin = optixGetWorldRayOrigin(); + const int entityId = static_cast(optixGetInstanceId()); - // TODO: Optimization - we can use inversesqrt here, which is one operation cheaper then sqrt. - float distance = sqrt(pow((hitWorld)[0] - (origin)[0], 2) + pow((hitWorld)[1] - (origin)[1], 2) + - pow((hitWorld)[2] - (origin)[2], 2)); + // Hitpoint + Vec3f hitObject = Vec3f((1 - u - v) * A + u * B + v * C); + const Vec3f hitWorldRaytraced = optixTransformPointFromObjectToWorldSpace(hitObject); + const Vector<3, double> hwrd = hitWorldRaytraced; + const Vector<3, double> hso = beamSampleOrigin; + const double distance = (hwrd - hso).length(); + const Vec3f hitWorldSeenBySensor = [&]() { + if (!ctx.doApplyDistortion) { + return hitWorldRaytraced; + } + Mat3x4f sampleRayTf = beamSampleRayIdx == 0 ? Mat3x4f::identity() : + makeBeamSampleRayTransform(ctx.hBeamHalfDivergenceRad, ctx.vBeamHalfDivergenceRad, circleIdx, vertexIdx); + Mat3x4f undistortedRay = ctx.raysWorld[beamIdx] * sampleRayTf; + Vec3f undistortedOrigin = undistortedRay * Vec3f{0, 0, 0}; + Vec3f undistortedDir = undistortedRay * Vec3f{0, 0, 1} - undistortedOrigin; + return undistortedOrigin + undistortedDir * distance; + }(); + // Early out for points that are too close to the sensor float minRange = ctx.rayRangesCount == 1 ? ctx.rayRanges[0].x() : ctx.rayRanges[optixGetLaunchIndex().x].x(); if (distance < minRange) { - saveNonHitRayResult(ctx.nearNonHitDistance); + ctx.mrSamples.isHit[mrSamplesIdx] = false; + if (beamSampleRayIdx == 0) { + saveNonHitRayResult(ctx.nearNonHitDistance); + } return; } - // Fix XYZ if distortion is applied (XYZ must be calculated in sensor coordinate frame) - if (ctx.doApplyDistortion) { - const int rayIdx = optixGetLaunchIndex().x; - Mat3x4f undistortedRay = ctx.rays[rayIdx]; - Vec3f undistortedOrigin = undistortedRay * Vec3f{0, 0, 0}; - Vec3f undistortedDir = undistortedRay * Vec3f{0, 0, 1} - undistortedOrigin; - hitWorld = undistortedOrigin + undistortedDir * distance; - } - // Normal vector and incident angle - Vec3f rayDir = (hitWorld - origin).normalized(); + Vec3f rayDir = optixGetWorldRayDirection(); const Vec3f wA = optixTransformPointFromObjectToWorldSpace(A); const Vec3f wB = optixTransformPointFromObjectToWorldSpace(B); const Vec3f wC = optixTransformPointFromObjectToWorldSpace(C); @@ -228,6 +188,13 @@ extern "C" __global__ void __closesthit__() intensity = tex2D(entityData.texture, uv[0], uv[1]); } + // Save sub-sampling results + ctx.mrSamples.isHit[mrSamplesIdx] = true; + ctx.mrSamples.distance[mrSamplesIdx] = distance; + if (beamSampleRayIdx != 0) { + return; + } + Vec3f absPointVelocity{NAN}; Vec3f relPointVelocity{NAN}; float radialSpeed{NAN}; @@ -243,7 +210,7 @@ extern "C" __global__ void __closesthit__() // Then, we can connect marker dot in previous raytracing frame with its current position and obtain displacementFromTransformChange vector // Dividing displacementFromTransformChange by time elapsed from the previous raytracing frame yields velocity vector. Vec3f displacementVectorOrigin = entityData.prevFrameLocalToWorld * hitObject; - displacementFromTransformChange = hitWorld - displacementVectorOrigin; + displacementFromTransformChange = hitWorldRaytraced - displacementVectorOrigin; } // Some entities may have skinned meshes - in this case entity.vertexDisplacementSincePrevFrame will be non-null @@ -266,17 +233,100 @@ extern "C" __global__ void __closesthit__() Vec3f absPointVelocityInSensorFrame = ctx.rayOriginToWorld.rotation().inverse() * absPointVelocity; Vec3f relPointVelocityBasedOnSensorLinearVelocity = absPointVelocityInSensorFrame - ctx.sensorLinearVelocityXYZ; - Vec3f hitRays = ctx.rayOriginToWorld.inverse() * hitWorld; + Vec3f hitRays = ctx.rayOriginToWorld.inverse() * hitWorldRaytraced; Vec3f relPointVelocityBasedOnSensorAngularVelocity = Vec3f(.0f) - ctx.sensorAngularVelocityRPY.cross(hitRays); relPointVelocity = relPointVelocityBasedOnSensorLinearVelocity + relPointVelocityBasedOnSensorAngularVelocity; radialSpeed = hitRays.normalized().dot(relPointVelocity); } + saveRayResult(hitWorldSeenBySensor, distance, intensity, entityId, absPointVelocity, relPointVelocity, radialSpeed, + wNormal, incidentAngle); +} + +extern "C" __global__ void __anyhit__() {} + +// Helper functions implementations - saveRayResult(hitWorld, distance, intensity, objectID, absPointVelocity, relPointVelocity, radialSpeed, wNormal, - incidentAngle); +__device__ void shootSamplingRay(const Mat3x4f& ray, float maxRange, unsigned int beamSampleRayIdx) +{ + Vec3f origin = ray * Vec3f{0, 0, 0}; + Vec3f dir = ray * Vec3f{0, 0, 1} - origin; + const unsigned int flags = OPTIX_RAY_FLAG_DISABLE_ANYHIT; // TODO: try adding OPTIX_RAY_FLAG_CULL_BACK_FACING_TRIANGLES + optixTrace(ctx.scene, origin, dir, 0.0f, maxRange, 0.0f, OptixVisibilityMask(255), flags, 0, 1, 0, beamSampleRayIdx); } -extern "C" __global__ void __miss__() { saveNonHitRayResult(ctx.farNonHitDistance); } +__device__ Mat3x4f makeBeamSampleRayTransform(float hHalfDivergenceRad, float vHalfDivergenceRad, unsigned int layerIdx, unsigned int vertexIdx) +{ + if (ctx.hBeamHalfDivergenceRad == 0.0f && ctx.vBeamHalfDivergenceRad == 0.0f) { + return Mat3x4f::identity(); + } -extern "C" __global__ void __anyhit__() {} + const float hCurrentHalfDivergenceRad = hHalfDivergenceRad * (1.0f - static_cast(layerIdx) / MULTI_RETURN_BEAM_LAYERS); + const float vCurrentHalfDivergenceRad = vHalfDivergenceRad * (1.0f - static_cast(layerIdx) / MULTI_RETURN_BEAM_LAYERS); + + const float angleStep = 2.0f * static_cast(M_PI) / MULTI_RETURN_BEAM_VERTICES; + + const float hAngle = hCurrentHalfDivergenceRad * cos(static_cast(vertexIdx) * angleStep); + const float vAngle = vCurrentHalfDivergenceRad * sin(static_cast(vertexIdx) * angleStep); + + return Mat3x4f::rotationRad(vAngle, 0.0f, 0.0f) * Mat3x4f::rotationRad(0.0f, hAngle, 0.0f); +} + +__device__ void saveNonHitRayResult(float nonHitDistance) +{ + Mat3x4f ray = ctx.raysWorld[optixGetLaunchIndex().x]; + Vec3f origin = ray * Vec3f{0, 0, 0}; + Vec3f dir = ray * Vec3f{0, 0, 1} - origin; + Vec3f displacement = dir.normalized() * nonHitDistance; + displacement = {isnan(displacement.x()) ? 0 : displacement.x(), isnan(displacement.y()) ? 0 : displacement.y(), + isnan(displacement.z()) ? 0 : displacement.z()}; + Vec3f xyz = origin + displacement; + saveRayResult(xyz, nonHitDistance, 0, RGL_ENTITY_INVALID_ID, Vec3f{NAN}, Vec3f{NAN}, 0.001f, Vec3f{NAN}, NAN); +} + +template +__device__ void saveRayResult(const Vec3f& xyz, float distance, float intensity, const int objectID, const Vec3f& absVelocity, + const Vec3f& relVelocity, float radialSpeed, const Vec3f& normal, float incidentAngle) +{ + const int beamIdx = static_cast(optixGetLaunchIndex().x); + if (ctx.xyz != nullptr) { + // Return actual XYZ of the hit point or infinity vector. + ctx.xyz[beamIdx] = xyz; + } + if (ctx.isHit != nullptr) { + ctx.isHit[beamIdx] = isFinite; + } + if (ctx.rayIdx != nullptr) { + ctx.rayIdx[beamIdx] = beamIdx; + } + if (ctx.ringIdx != nullptr && ctx.ringIds != nullptr) { + ctx.ringIdx[beamIdx] = ctx.ringIds[beamIdx % ctx.ringIdsCount]; + } + if (ctx.distance != nullptr) { + ctx.distance[beamIdx] = distance; + } + if (ctx.intensity != nullptr) { + ctx.intensity[beamIdx] = intensity; + } + if (ctx.timestamp != nullptr) { + ctx.timestamp[beamIdx] = ctx.sceneTime; + } + if (ctx.entityId != nullptr) { + ctx.entityId[beamIdx] = isFinite ? objectID : RGL_ENTITY_INVALID_ID; + } + if (ctx.pointAbsVelocity != nullptr) { + ctx.pointAbsVelocity[beamIdx] = absVelocity; + } + if (ctx.pointRelVelocity != nullptr) { + ctx.pointRelVelocity[beamIdx] = relVelocity; + } + if (ctx.radialSpeed != nullptr) { + ctx.radialSpeed[beamIdx] = radialSpeed; + } + if (ctx.normal != nullptr) { + ctx.normal[beamIdx] = normal; + } + if (ctx.incidentAngle != nullptr) { + ctx.incidentAngle[beamIdx] = incidentAngle; + } +} diff --git a/src/graph/Interfaces.hpp b/src/graph/Interfaces.hpp index c618ef8f2..97d3fd8fe 100644 --- a/src/graph/Interfaces.hpp +++ b/src/graph/Interfaces.hpp @@ -87,6 +87,8 @@ struct IPointsNode : virtual Node virtual std::size_t getPointCount() const { return getWidth() * getHeight(); } virtual Mat3x4f getLookAtOriginTransform() const { return Mat3x4f::identity(); } + virtual Vec3f getLinearVelocity() const { return Vec3f{0.0f}; } + virtual Vec3f getAngularVelocity() const { return Vec3f{0.0f}; } // Data getters virtual IAnyArray::ConstPtr getFieldData(rgl_field_t field) = 0; @@ -120,6 +122,8 @@ struct IPointsNodeSingleInput : IPointsNode virtual size_t getHeight() const override { return input->getHeight(); } virtual Mat3x4f getLookAtOriginTransform() const override { return input->getLookAtOriginTransform(); } + virtual Vec3f getLinearVelocity() const { return input->getLinearVelocity(); } + virtual Vec3f getAngularVelocity() const { return input->getAngularVelocity(); } // Data getters virtual bool hasField(rgl_field_t field) const { return input->hasField(field); } diff --git a/src/graph/NodesCore.hpp b/src/graph/NodesCore.hpp index 22e59c1fb..939def8b8 100644 --- a/src/graph/NodesCore.hpp +++ b/src/graph/NodesCore.hpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,9 @@ #include #include #include +#include +#include +#include struct FormatPointsNode : IPointsNodeSingleInput @@ -112,6 +116,8 @@ struct RaytraceNode : IPointsNode size_t getHeight() const override { return 1; } // TODO: implement height in use_rays Mat3x4f getLookAtOriginTransform() const override { return raysNode->getCumulativeRayTransfrom().inverse(); } + Vec3f getLinearVelocity() const override { return sensorLinearVelocityXYZ; } + Vec3f getAngularVelocity() const override { return sensorAngularVelocityRPY; } // Data getters IAnyArray::ConstPtr getFieldData(rgl_field_t field) override @@ -122,10 +128,16 @@ struct RaytraceNode : IPointsNode return std::const_pointer_cast(fieldData.at(field)); } + IAnyArray::ConstPtr getFieldDataMultiReturn(rgl_field_t field, rgl_return_type_t); + // RaytraceNode specific void setVelocity(const Vec3f& linearVelocity, const Vec3f& angularVelocity); void enableRayDistortion(bool enabled) { doApplyDistortion = enabled; } void setNonHitDistanceValues(float nearDistance, float farDistance); + void setNonHitsMask(const int8_t* maskRaw, size_t maskPointCount); + void setBeamDivergence(float hDivergenceRad, float vDivergenceRad) { + hBeamHalfDivergenceRad = hDivergenceRad / 2.0f; + vBeamHalfDivergenceRad = vDivergenceRad / 2.0f;} private: IRaysNode::Ptr raysNode; @@ -137,12 +149,48 @@ struct RaytraceNode : IPointsNode float nearNonHitDistance{std::numeric_limits::infinity()}; float farNonHitDistance{std::numeric_limits::infinity()}; + float hBeamHalfDivergenceRad = 0.0f; + float vBeamHalfDivergenceRad = 0.0f; + + DeviceAsyncArray::Ptr rayMask; HostPinnedArray::Ptr requestCtxHst = HostPinnedArray::create(); DeviceAsyncArray::Ptr requestCtxDev = DeviceAsyncArray::create(arrayMgr); std::unordered_map fieldData; // All should be DeviceAsyncArray + struct MultiReturnFields + { + MultiReturnFields(StreamBoundObjectsManager& arrayMgr) + : isHit(DeviceAsyncArray::type>::create(arrayMgr)), + xyz(DeviceAsyncArray::type>::create(arrayMgr)), + distance(DeviceAsyncArray::type>::create(arrayMgr)) + {} + void resize(size_t size) + { + isHit->resize(size, false, false); + xyz->resize(size, false, false); + distance->resize(size, false, false); + } + MultiReturnPointers getPointers() + { + return MultiReturnPointers{ + .isHit = isHit->getWritePtr(), + .xyz = xyz->getWritePtr(), + .distance = distance->getWritePtr(), + }; + } + DeviceAsyncArray::type>::Ptr isHit; + DeviceAsyncArray::type>::Ptr xyz; + DeviceAsyncArray::type>::Ptr distance; + }; + + + MultiReturnFields mrSamples = MultiReturnFields{arrayMgr}; + MultiReturnFields mrFirst = MultiReturnFields{arrayMgr}; + MultiReturnFields mrLast = MultiReturnFields{arrayMgr}; + + template auto getPtrTo(); @@ -150,6 +198,34 @@ struct RaytraceNode : IPointsNode void setFields(const std::set& fields); }; +struct MultiReturnSwitchNode : IPointsNodeSingleInput +{ + using Ptr = std::shared_ptr; + void setParameters(rgl_return_type_t returnType) { this->returnType = returnType; } + + // Node + void validateImpl() override + { + IPointsNodeSingleInput::validateImpl(); + rtxInput = getExactlyOneInputOfType(); + } + + void enqueueExecImpl() override {} + + // Data getters + IAnyArray::ConstPtr getFieldData(rgl_field_t field) override + { + if (returnType == RGL_RETURN_TYPE_NOT_DIVERGENT) { + return rtxInput->getFieldData(field); + } + return rtxInput->getFieldDataMultiReturn(field, returnType); + } + +private: + rgl_return_type_t returnType; + RaytraceNode::Ptr rtxInput; +}; + struct TransformPointsNode : IPointsNodeSingleInput { using Ptr = std::shared_ptr; @@ -493,11 +569,14 @@ struct RadarPostprocessPointsNode : IPointsNodeSingleInput // Data getters IAnyArray::ConstPtr getFieldData(rgl_field_t field) override; + const std::vector& getClusterAabbs() const { return clusterAabbs; } + private: // Data containers std::vector::type> filteredIndicesHost; DeviceAsyncArray::type>::Ptr filteredIndices = DeviceAsyncArray::type>::create( arrayMgr); + HostPinnedArray::type>::Ptr xyzInputHost = HostPinnedArray::type>::create(); HostPinnedArray::type>::Ptr distanceInputHost = HostPinnedArray::type>::create(); HostPinnedArray::type>::Ptr azimuthInputHost = HostPinnedArray::type>::create(); HostPinnedArray::type>::Ptr radialSpeedInputHost = @@ -527,6 +606,7 @@ struct RadarPostprocessPointsNode : IPointsNodeSingleInput float receivedNoiseStDevDb; std::vector radarScopes; + std::vector clusterAabbs; std::random_device randomDevice; @@ -555,6 +635,121 @@ struct RadarPostprocessPointsNode : IPointsNodeSingleInput }; }; +struct RadarTrackObjectsNode : IPointsNodeSingleInput +{ + using Ptr = std::shared_ptr; + + enum class ObjectStatus : uint8_t + { + Measured = 0, + New = 1, + Predicted = 2, + Invalid = 255 + }; + + enum class MovementStatus : uint8_t + { + Moved = 0, + Stationary = 1, + Invalid = 255 + }; + + struct ObjectBounds + { + Field::type mostCommonEntityId = RGL_ENTITY_INVALID_ID; + Vec3f position{0}; + Aabb3Df aabb{}; + }; + + struct ClassificationProbabilities + { + float existence{100.0f}; + uint8_t classCar{0}; + uint8_t classTruck{0}; + uint8_t classMotorcycle{0}; + uint8_t classBicycle{0}; + uint8_t classPedestrian{0}; + uint8_t classAnimal{0}; + uint8_t classHazard{0}; + uint8_t classUnknown{0}; + }; + + struct ObjectState + { + uint32_t id{0}; + uint32_t creationTime{0}; + uint32_t lastMeasuredTime{0}; + ObjectStatus objectStatus{ObjectStatus::Invalid}; + MovementStatus movementStatus{MovementStatus::Invalid}; + ClassificationProbabilities classificationProbabilities{}; + + RunningStats position{}; + RunningStats orientation{}; + RunningStats absVelocity{}; + RunningStats relVelocity{}; + RunningStats absAccel{}; + RunningStats relAccel{}; + RunningStats orientationRate{}; + RunningStats length{}; + RunningStats width{}; + }; + + RadarTrackObjectsNode(); + + void setParameters(float distanceThreshold, float azimuthThreshold, float elevationThreshold, float radialSpeedThreshold, + float maxMatchingDistance, float maxPredictionTimeFrame, float movementSensitivity); + + void setObjectClasses(const int32_t* entityIds, const rgl_radar_object_class_t* objectClasses, int32_t count); + + void enqueueExecImpl() override; + + bool hasField(rgl_field_t field) const override { return fieldData.contains(field); } + + std::vector getRequiredFieldList() const override; + + // Data getters + IAnyArray::ConstPtr getFieldData(rgl_field_t field) override { return fieldData.at(field); } + size_t getWidth() const override { return fieldData.empty() ? 0 : fieldData.begin()->second->getCount(); } + size_t getHeight() const override { return 1; } // In fact, this will be only a 1-dimensional array. + + const std::list& getObjectStates() const { return objectStates; } + +private: + Vec3f predictObjectPosition(const ObjectState& objectState, double deltaTimeMs) const; + void parseEntityIdToClassProbability(Field::type entityId, ClassificationProbabilities& probabilities); + void createObjectState(const ObjectBounds& objectBounds, double currentTimeMs); + void updateObjectState(ObjectState& objectState, const Vec3f& updatedPosition, const Aabb3Df& updatedAabb, + ObjectStatus objectStatus, double currentTimeMs, double deltaTimeMs); + void updateOutputData(); + + std::list objectStates; + std::unordered_map::type, rgl_radar_object_class_t> entityIdsToClasses; + std::unordered_map fieldData; + + uint32_t objectIDCounter = 0; // Not static - I assume each ObjectTrackingNode is like a separate radar. + std::queue objectIDPoll; + + float distanceThreshold; + float azimuthThreshold; + float elevationThreshold; + float radialSpeedThreshold; + + float maxMatchingDistance = + 1.0f; // Max distance between predicted and newly detected position to match objects between frames. + float maxPredictionTimeFrame = + 500.0f; // Maximum time in milliseconds that can pass between two detections of the same object. + // In other words, how long object state can be predicted until it will be declared lost. + float movementSensitivity = 0.01f; // Max position change for an object to be qualified as MovementStatus::Stationary. + + HostPinnedArray::type>::Ptr xyzHostPtr = HostPinnedArray::type>::create(); + HostPinnedArray::type>::Ptr distanceHostPtr = HostPinnedArray::type>::create(); + HostPinnedArray::type>::Ptr azimuthHostPtr = HostPinnedArray::type>::create(); + HostPinnedArray::type>::Ptr elevationHostPtr = HostPinnedArray::type>::create(); + HostPinnedArray::type>::Ptr radialSpeedHostPtr = + HostPinnedArray::type>::create(); + HostPinnedArray::type>::Ptr entityIdHostPtr = HostPinnedArray::type>::create(); +}; + struct FilterGroundPointsNode : IPointsNodeSingleInput { using Ptr = std::shared_ptr; diff --git a/src/graph/RadarPostprocessPointsNode.cpp b/src/graph/RadarPostprocessPointsNode.cpp index d39cf64be..252f1c493 100644 --- a/src/graph/RadarPostprocessPointsNode.cpp +++ b/src/graph/RadarPostprocessPointsNode.cpp @@ -78,6 +78,7 @@ void RadarPostprocessPointsNode::enqueueExecImpl() return; } + xyzInputHost->copyFrom(input->getFieldData(XYZ_VEC3_F32)); distanceInputHost->copyFrom(input->getFieldData(DISTANCE_F32)); azimuthInputHost->copyFrom(input->getFieldData(AZIMUTH_F32)); radialSpeedInputHost->copyFrom(input->getFieldData(RADIAL_SPEED_F32)); @@ -143,13 +144,19 @@ void RadarPostprocessPointsNode::enqueueExecImpl() clusterPowerHost->resize(filteredIndicesHost.size(), false, false); clusterNoiseHost->resize(filteredIndicesHost.size(), false, false); clusterSnrHost->resize(filteredIndicesHost.size(), false, false); + std::normal_distribution gaussianNoise(receivedNoiseMeanDb, receivedNoiseStDevDb); + clusterAabbs.resize(clusters.size()); + for (int clusterIdx = 0; clusterIdx < clusters.size(); ++clusterIdx) { std::complex AU = 0; std::complex AR = 0; - auto&& cluster = clusters[clusterIdx]; + auto& cluster = clusters[clusterIdx]; + auto& clusterAabb = clusterAabbs[clusterIdx]; + clusterAabb.reset(); for (const auto pointInCluster : cluster.indices) { + clusterAabb.expand(xyzInputHost->at(pointInCluster)); std::complex BU = {outBUBRFactorHost->at(pointInCluster)[0].real(), outBUBRFactorHost->at(pointInCluster)[0].imag()}; std::complex BR = {outBUBRFactorHost->at(pointInCluster)[1].real(), @@ -271,8 +278,20 @@ void RadarPostprocessPointsNode::RadarCluster::addPoint(Field::type minMaxDistance[1] = std::max(minMaxDistance[1], distance); minMaxAzimuth[0] = std::min(minMaxAzimuth[0], azimuth); minMaxAzimuth[1] = std::max(minMaxAzimuth[1], azimuth); - minMaxRadialSpeed[0] = std::min(minMaxRadialSpeed[0], radialSpeed); - minMaxRadialSpeed[1] = std::max(minMaxRadialSpeed[1], radialSpeed); + + // There are three reasonable cases that should be handled as intended: + // - limit and radialSpeed are nans - limit will be set to nan (from radialSpeed) + // - limit is nan and radialSpeed if a number - limit will be set to a number (from radialSpeed) + // - limit and radialSpeed are numbers - std::min will be called. + // There can technically be a fourth case: + // - limit is a number and radialSpeed is nan - this should technically be handled via order of arguments in std::min + // (assumption that comparison inside will return false); what is more important, such candidate should be eliminated + // with isCandidate method; + // The fourth case should not occur - radial speed is either nan for all entities (first raycast, with delta time = 0) or + // has value for all entities. + minMaxRadialSpeed[0] = std::isnan(minMaxRadialSpeed[0]) ? radialSpeed : std::min(minMaxRadialSpeed[0], radialSpeed); + minMaxRadialSpeed[1] = std::isnan(minMaxRadialSpeed[1]) ? radialSpeed : std::max(minMaxRadialSpeed[1], radialSpeed); + minMaxElevation[0] = std::min(minMaxElevation[0], elevation); minMaxElevation[1] = std::max(minMaxElevation[1], elevation); } @@ -284,12 +303,22 @@ inline bool RadarPostprocessPointsNode::RadarCluster::isCandidate(float distance const auto isWithinDistanceUpperBound = distance - radarScope.distance_separation_threshold <= minMaxDistance[1]; const auto isWithinAzimuthLowerBound = azimuth + radarScope.azimuth_separation_threshold >= minMaxAzimuth[0]; const auto isWithinAzimuthUpperBound = azimuth - radarScope.azimuth_separation_threshold <= minMaxAzimuth[1]; + const auto isWithinRadialSpeedLowerBound = radialSpeed + radarScope.radial_speed_separation_threshold >= minMaxRadialSpeed[0]; const auto isWithinRadialSpeedUpperBound = radialSpeed - radarScope.radial_speed_separation_threshold <= minMaxRadialSpeed[1]; + + // Radial speed may be nan if this is the first raytracing call (delta time equals 0). When cluster has nan radial speed + // limits (technically just do not have radial speed information), the goal below is to ignore radial speed checkout. The + // next goal is to allow adding points to cluster, if these points have non nan radial speed - to eliminate undefined + // radial speed information from cluster. This should also work well, when limits are fine and candidate's radial speed + // is nan - then radial speed checkouts will give false and the candidate will not pass. + // Context for radial speed checkouts below - this can be interpreted as "true if any radial speed limit is nan or + // candidate's radial speed is within limits" return isWithinDistanceLowerBound && isWithinDistanceUpperBound && isWithinAzimuthLowerBound && isWithinAzimuthUpperBound && - isWithinRadialSpeedLowerBound && isWithinRadialSpeedUpperBound; + (std::isunordered(minMaxRadialSpeed[0], minMaxRadialSpeed[1]) || + (isWithinRadialSpeedLowerBound && isWithinRadialSpeedUpperBound)); } inline bool RadarPostprocessPointsNode::RadarCluster::canMergeWith(const RadarCluster& other, @@ -323,7 +352,10 @@ inline bool RadarPostprocessPointsNode::RadarCluster::canMergeWith(const RadarCl areRangesWithinThreshold(minMaxRadialSpeed, other.minMaxRadialSpeed, radarScope->radial_speed_separation_threshold); - return isDistanceGood && isAzimuthGood && isRadialSpeedGood; + // Radial speed check is ignored if one of limits on it, in one of clusters, is nan. + return isDistanceGood && isAzimuthGood && + (isRadialSpeedGood || std::isunordered(minMaxRadialSpeed[0], minMaxRadialSpeed[1]) || + std::isunordered(other.minMaxRadialSpeed[0], other.minMaxRadialSpeed[1])); } void RadarPostprocessPointsNode::RadarCluster::takeIndicesFrom(RadarCluster&& other) @@ -332,8 +364,8 @@ void RadarPostprocessPointsNode::RadarCluster::takeIndicesFrom(RadarCluster&& ot minMaxDistance[1] = std::max(minMaxDistance[1], other.minMaxDistance[1]); minMaxAzimuth[0] = std::min(minMaxAzimuth[0], other.minMaxAzimuth[0]); minMaxAzimuth[1] = std::max(minMaxAzimuth[1], other.minMaxAzimuth[1]); - minMaxRadialSpeed[0] = std::min(minMaxRadialSpeed[0], other.minMaxRadialSpeed[0]); - minMaxRadialSpeed[1] = std::max(minMaxRadialSpeed[1], other.minMaxRadialSpeed[1]); + minMaxRadialSpeed[0] = std::isnan(other.minMaxRadialSpeed[0]) ? minMaxRadialSpeed[0] : std::min(other.minMaxRadialSpeed[0], minMaxRadialSpeed[0]); + minMaxRadialSpeed[1] = std::isnan(other.minMaxRadialSpeed[1]) ? minMaxRadialSpeed[1] : std::min(other.minMaxRadialSpeed[1], minMaxRadialSpeed[1]); minMaxElevation[0] = std::min(minMaxElevation[0], other.minMaxElevation[0]); minMaxElevation[1] = std::max(minMaxElevation[1], other.minMaxElevation[1]); diff --git a/src/graph/RadarTrackObjectsNode.cpp b/src/graph/RadarTrackObjectsNode.cpp new file mode 100644 index 000000000..9ea8980b4 --- /dev/null +++ b/src/graph/RadarTrackObjectsNode.cpp @@ -0,0 +1,326 @@ +// Copyright 2024 Robotec.AI +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + + +// TODO(Pawel): Consider adding more output fields here, maybe usable for ROS 2 message or visualization. Consider also returning detections, when object states are returned through public method. +RadarTrackObjectsNode::RadarTrackObjectsNode() +{ + fieldData.emplace(XYZ_VEC3_F32, createArray(XYZ_VEC3_F32)); + fieldData.emplace(ENTITY_ID_I32, createArray(ENTITY_ID_I32)); +} + +void RadarTrackObjectsNode::setParameters(float distanceThreshold, float azimuthThreshold, float elevationThreshold, + float radialSpeedThreshold, float maxMatchingDistance, float maxPredictionTimeFrame, + float movementSensitivity) +{ + this->distanceThreshold = distanceThreshold; + this->azimuthThreshold = azimuthThreshold; + this->elevationThreshold = elevationThreshold; + this->radialSpeedThreshold = radialSpeedThreshold; + + this->maxMatchingDistance = maxMatchingDistance; + this->maxPredictionTimeFrame = maxPredictionTimeFrame; + this->movementSensitivity = movementSensitivity; +} + +void RadarTrackObjectsNode::setObjectClasses(const int32_t* entityIds, const rgl_radar_object_class_t* objectClasses, + int32_t count) +{ + entityIdsToClasses.clear(); + for (int i = 0; i < count; ++i) { + entityIdsToClasses[static_cast::type>(entityIds[i])] = objectClasses[i]; + } +} + +void RadarTrackObjectsNode::enqueueExecImpl() +{ + xyzHostPtr->copyFrom(input->getFieldData(XYZ_VEC3_F32)); + distanceHostPtr->copyFrom(input->getFieldData(DISTANCE_F32)); + azimuthHostPtr->copyFrom(input->getFieldData(AZIMUTH_F32)); + elevationHostPtr->copyFrom(input->getFieldData(ELEVATION_F32)); + radialSpeedHostPtr->copyFrom(input->getFieldData(RADIAL_SPEED_F32)); + entityIdHostPtr->copyFrom(input->getFieldData(ENTITY_ID_I32)); + + // TODO(Pawel): Reconsider approach below. + // At this moment, I would like to check input this way, because it will keep RadarTrackObjectsNode testable without + // an input being RadarPostprocessPointsNode. If I have nullptr here, I simply do not process bounding boxes for detections. + auto radarPostprocessPointsNode = std::dynamic_pointer_cast(input); + const auto detectionAabbs = radarPostprocessPointsNode ? radarPostprocessPointsNode->getClusterAabbs() : + std::vector(input->getPointCount()); + + // Top level in this list is for objects. Bottom level is for detections that belong to specific objects. Below is an initialization of a helper + // structure for region growing, which starts with a while-loop. + std::list> objectIndices; + for (int i = 0; i < input->getPointCount(); ++i) { + objectIndices.emplace_back(1, i); + } + + int leftIndex = 0; + while (leftIndex + 1 < objectIndices.size()) { + auto ownIt = objectIndices.begin(); + std::advance(ownIt, leftIndex); + auto checkedIt = std::next(ownIt); + bool reorganizationTookPlace = false; + + while (checkedIt != objectIndices.end()) { + auto& checkedIndices = *checkedIt; + + // TODO(Pawel): In theory, I do not have to check the same indices multiple times when growing this subset in next iterations. + for (auto ownDetectionId : *ownIt) { + const auto isPartOfSameObject = [&, distance = distanceHostPtr->at(ownDetectionId), + azimuth = azimuthHostPtr->at(ownDetectionId), + elevation = elevationHostPtr->at(ownDetectionId), + radialSpeed = radialSpeedHostPtr->at(ownDetectionId)](int checkedDetectionId) { + return std::abs(distanceHostPtr->at(checkedDetectionId) - distance) <= distanceThreshold && + std::abs(azimuthHostPtr->at(checkedDetectionId) - azimuth) <= azimuthThreshold && + std::abs(elevationHostPtr->at(checkedDetectionId) - elevation) <= elevationThreshold && + (std::isunordered(radialSpeed, radialSpeedHostPtr->at(checkedDetectionId)) || + std::abs(radialSpeedHostPtr->at(checkedDetectionId) - radialSpeed) <= radialSpeedThreshold); + }; + + if (std::any_of(checkedIndices.cbegin(), checkedIndices.cend(), isPartOfSameObject)) { + ownIt->splice(ownIt->cend(), checkedIndices); + break; + } + } + if (checkedIndices.empty()) { + checkedIt = objectIndices.erase(checkedIt); + reorganizationTookPlace = true; + } else { + ++checkedIt; + } + } + + if (!reorganizationTookPlace) { + // We are done with growing this object, no more matching detections are available. + ++leftIndex; + } + } + + // Calculate positions of objects detected in current frame. + std::list newObjectBounds; + for (const auto& separateObjectIndices : objectIndices) { + auto& objectBounds = newObjectBounds.emplace_back(); + auto entityIdHist = std::unordered_map::type, int>(); + for (const auto detectionIndex : separateObjectIndices) { + ++entityIdHist[entityIdHostPtr->at(detectionIndex)]; + objectBounds.position += xyzHostPtr->at(detectionIndex); + objectBounds.aabb += detectionAabbs[detectionIndex]; + } + // Most common detection entity id is assigned as object id. + int maxIdCount = -1; + for (const auto& [entityId, count] : entityIdHist) { + if (count > maxIdCount) { + objectBounds.mostCommonEntityId = entityId; + maxIdCount = count; + } + } + objectBounds.position *= 1 / static_cast(separateObjectIndices.size()); + } + + const auto currentTime = Scene::instance().getTime().value_or(Time::zero()).asMilliseconds(); + const auto deltaTime = currentTime - Scene::instance().getPrevTime().value_or(Time::zero()).asMilliseconds(); + + // Check object list from previous frame and try to find matches with newly detected objects. + // Later, for newly detected objects without match, create new object state. + for (auto objectStateIt = objectStates.begin(); objectStateIt != objectStates.end();) { + auto& objectState = *objectStateIt; + const auto predictedPosition = predictObjectPosition(objectState, deltaTime); + const auto closestObjectIt = std::min_element( + newObjectBounds.cbegin(), newObjectBounds.cend(), [&](const auto& a, const auto& b) { + return (a.position - predictedPosition).lengthSquared() < (b.position - predictedPosition).lengthSquared(); + }); + + // There is a newly detected object (current frame) that matches the predicted position of one of objects from previous frame. + // Update object from previous frame to newly detected object position and remove this positions for next checkouts. + if (const auto& closestObject = *closestObjectIt; + (predictedPosition - closestObject.position).length() < maxMatchingDistance) { + updateObjectState(objectState, closestObject.position, closestObject.aabb, ObjectStatus::Measured, currentTime, + deltaTime); + newObjectBounds.erase(closestObjectIt); + ++objectStateIt; + continue; + } + + // There is no match for objectState in newly detected object positions. If that object was already detected in previous frame + // (new, measured or predicted, does not matter) and its last measurement time was within maxPredictionTimeFrame, then its + // position (and state) in current frame is predicted. + if (objectState.lastMeasuredTime >= currentTime - maxPredictionTimeFrame) { + updateObjectState(objectState, predictedPosition, {}, ObjectStatus::Predicted, currentTime, deltaTime); + ++objectStateIt; + continue; + } + + // objectState do not have a match in newly detected object positions and it was also on ObjectStatus::Predicted last frame - + // not this object is considered lost and is removed from object list. Also remove its ID to poll. + objectIDPoll.push(objectState.id); + objectStateIt = objectStates.erase(objectStateIt); + } + + // All newly detected object position that do not have a match in previous frame - create new object state. + for (const auto& newObject : newObjectBounds) { + createObjectState(newObject, currentTime); + } + + updateOutputData(); +} + +std::vector RadarTrackObjectsNode::getRequiredFieldList() const +{ + return {XYZ_VEC3_F32, DISTANCE_F32, AZIMUTH_F32, ELEVATION_F32, RADIAL_SPEED_F32, ENTITY_ID_I32}; +} + +Vec3f RadarTrackObjectsNode::predictObjectPosition(const ObjectState& objectState, double deltaTimeMs) const +{ + assert(objectState.position.getSamplesCount() > 0); + assert(deltaTimeMs <= std::numeric_limits::max()); + const auto deltaTimeSec = 1e-3f * static_cast(deltaTimeMs); + const auto assumedVelocity = objectState.absAccel.getSamplesCount() > 0 ? + (objectState.absVelocity.getLastSample() + + deltaTimeSec * objectState.absAccel.getLastSample()) : + objectState.absVelocity.getLastSample(); + const auto predictedMovement = deltaTimeSec * assumedVelocity; + return objectState.position.getLastSample() + Vec3f{predictedMovement.x(), predictedMovement.y(), 0.0f}; +} + +void RadarTrackObjectsNode::parseEntityIdToClassProbability(Field::type entityId, + ClassificationProbabilities& probabilities) +{ + // May be updated, if entities will be able to belong to multiple classes. + constexpr auto maxClassificationProbability = std::numeric_limits::max(); + + const auto it = entityIdsToClasses.find(entityId); + if (it == entityIdsToClasses.cend()) { + probabilities.classUnknown = maxClassificationProbability; + return; + } + + // Single probability is set, but not probabilities are zeroed - the logic here is that probabilities are only set on + // object creation, so initially all probabilities are zero. + switch (it->second) { + case RGL_RADAR_CLASS_CAR: probabilities.classCar = maxClassificationProbability; break; + case RGL_RADAR_CLASS_TRUCK: probabilities.classTruck = maxClassificationProbability; break; + case RGL_RADAR_CLASS_MOTORCYCLE: probabilities.classMotorcycle = maxClassificationProbability; break; + case RGL_RADAR_CLASS_BICYCLE: probabilities.classBicycle = maxClassificationProbability; break; + case RGL_RADAR_CLASS_PEDESTRIAN: probabilities.classPedestrian = maxClassificationProbability; break; + case RGL_RADAR_CLASS_ANIMAL: probabilities.classAnimal = maxClassificationProbability; break; + case RGL_RADAR_CLASS_HAZARD: probabilities.classHazard = maxClassificationProbability; break; + default: probabilities.classUnknown = maxClassificationProbability; + } +} + +void RadarTrackObjectsNode::createObjectState(const ObjectBounds& objectBounds, double currentTimeMs) +{ + auto& objectState = objectStates.emplace_back(); + if (objectIDPoll.empty()) { + // Overflow is technically acceptable below, but for > 4,294,967,295 objects it may cause having repeated IDs. + objectState.id = objectIDCounter++; + } else { + objectState.id = objectIDPoll.front(); + objectIDPoll.pop(); + } + + assert(currentTimeMs <= std::numeric_limits::max()); + objectState.creationTime = static_cast(currentTimeMs); + objectState.lastMeasuredTime = objectState.creationTime; + objectState.objectStatus = ObjectStatus::New; + + // TODO(Pawel): Consider object radial speed (from detections) as a way to decide here. + objectState.movementStatus = MovementStatus::Invalid; // No good way to determine it. + parseEntityIdToClassProbability(objectBounds.mostCommonEntityId, objectState.classificationProbabilities); + + // I do not add velocity or acceleration 0.0f samples because this would affect mean and std dev calculation. However, the + // number of samples between their stats and position will not match. + objectState.position.addSample(objectBounds.position); + + // TODO(Pawel): Consider updating this later. One option would be to take rotated bounding box, and calculate orientation as + // the vector perpendicular to its shorter edge. Then, width would be that defined as that exact edge. The other edge would + // be taken as length. + // At this moment I just assume that object length is alongside X axis, and its width is alongside Y axis. Note also that + // length and width does not have to be correlated to object orientation. + objectState.length.addSample(objectBounds.aabb.maxCorner().x() - objectBounds.aabb.minCorner().x()); + objectState.width.addSample(objectBounds.aabb.maxCorner().y() - objectBounds.aabb.minCorner().y()); +} + +void RadarTrackObjectsNode::updateObjectState(ObjectState& objectState, const Vec3f& updatedPosition, + const Aabb3Df& updatedAabb, ObjectStatus objectStatus, double currentTimeMs, + double deltaTimeMs) +{ + assert(deltaTimeMs > 0 && deltaTimeMs <= std::numeric_limits::max()); + const auto displacement = updatedPosition - objectState.position.getLastSample(); + const auto deltaTimeSecInv = 1e3f / static_cast(deltaTimeMs); + const auto absVelocity = Vec2f{displacement.x() * deltaTimeSecInv, displacement.y() * deltaTimeSecInv}; + + const auto radarVelocity = input->getLinearVelocity(); + const auto relVelocity = absVelocity - Vec2f{radarVelocity.x(), radarVelocity.y()}; + + if (objectStatus == ObjectStatus::Measured) { + assert(currentTimeMs <= std::numeric_limits::max()); + objectState.lastMeasuredTime = static_cast(currentTimeMs); + } + objectState.objectStatus = objectStatus; + objectState.movementStatus = displacement.length() > movementSensitivity ? MovementStatus::Moved : + MovementStatus::Stationary; + objectState.position.addSample(updatedPosition); + + // There has to be at leas one abs velocity sample from previous frames - in other words, this has to be the third frame to be + // able to calculate acceleration (first frame - position, second frame - velocity, third frame - acceleration). + if (objectState.absVelocity.getSamplesCount() > 0) { + const auto absAccel = (absVelocity - objectState.absVelocity.getLastSample()) * deltaTimeSecInv; + objectState.absAccel.addSample(absAccel); + + const auto relAccel = (relVelocity - objectState.relVelocity.getLastSample()) * deltaTimeSecInv; + objectState.relAccel.addSample(relAccel); + } + objectState.absVelocity.addSample(absVelocity); + objectState.relVelocity.addSample(relVelocity); + + // Behaves similar to acceleration. In order to calculate orientation I need velocity, which can be calculated starting from + // the second frame. For this reason, the third frame is the first one when I am able to calculate orientation rate. Additionally, + // if object does not move, keep its previous orientation. + const auto orientation = objectState.movementStatus == MovementStatus::Moved ? atan2(absVelocity.y(), absVelocity.x()) : + objectState.orientation.getLastSample(); + if (objectState.orientation.getSamplesCount() > 0) { + objectState.orientationRate.addSample(orientation - objectState.orientation.getLastSample()); + } + objectState.orientation.addSample(orientation); + + if (objectStatus == ObjectStatus::Measured) { + objectState.length.addSample(updatedAabb.maxCorner().x() - updatedAabb.minCorner().x()); + objectState.width.addSample(updatedAabb.maxCorner().y() - updatedAabb.minCorner().y()); + } else { + objectState.length.addSample(objectState.length.getLastSample()); + objectState.width.addSample(objectState.width.getLastSample()); + } +} + +void RadarTrackObjectsNode::updateOutputData() +{ + fieldData[XYZ_VEC3_F32]->resize(objectStates.size(), false, false); + auto* xyzPtr = static_cast::type*>(fieldData[XYZ_VEC3_F32]->getRawWritePtr()); + + fieldData[ENTITY_ID_I32]->resize(objectStates.size(), false, false); + auto* idPtr = static_cast::type*>(fieldData[ENTITY_ID_I32]->getRawWritePtr()); + + int objectIndex = 0; + for (const auto& objectState : objectStates) { + assert(objectState.id <= std::numeric_limits::type>::max()); + xyzPtr[objectIndex] = objectState.position.getLastSample(); + idPtr[objectIndex] = objectState.id; + ++objectIndex; + } +} diff --git a/src/graph/RaytraceNode.cpp b/src/graph/RaytraceNode.cpp index e07a2e616..d2b2df2bb 100644 --- a/src/graph/RaytraceNode.cpp +++ b/src/graph/RaytraceNode.cpp @@ -47,6 +47,11 @@ void RaytraceNode::validateImpl() auto msg = fmt::format("requested for raytrace with velocity distortion, but RaytraceNode cannot get time offsets"); throw InvalidPipeline(msg); } + if (rayMask != nullptr) { + if (rayMask->getCount() != raysNode->getRayCount()) { + throw InvalidPipeline("Mask size does not match the number of rays"); + } + } } template @@ -65,6 +70,11 @@ void RaytraceNode::enqueueExecImpl() data->resize(raysNode->getRayCount(), false, false); } + // TODO(prybicki): don't update this when not needed + mrSamples.resize(MULTI_RETURN_BEAM_SAMPLES * raysNode->getRayCount()); + mrFirst.resize(raysNode->getRayCount()); + mrLast.resize(raysNode->getRayCount()); + // Even though we are in graph thread here, we can access Scene class (see comment there) const Mat3x4f* raysPtr = raysNode->getRays()->asSubclass()->getReadPtr(); auto sceneAS = Scene::instance().getASLocked(); @@ -84,7 +94,7 @@ void RaytraceNode::enqueueExecImpl() .doApplyDistortion = doApplyDistortion, .nearNonHitDistance = nearNonHitDistance, .farNonHitDistance = farNonHitDistance, - .rays = raysPtr, + .raysWorld = raysPtr, .rayCount = raysNode->getRayCount(), .rayOriginToWorld = raysNode->getCumulativeRayTransfrom(), .rayRanges = rayRanges.has_value() ? (*rayRanges)->asSubclass()->getReadPtr() : @@ -92,8 +102,9 @@ void RaytraceNode::enqueueExecImpl() .rayRangesCount = rayRanges.has_value() ? (*rayRanges)->getCount() : defaultRange->getCount(), .ringIds = ringIds.has_value() ? (*ringIds)->asSubclass()->getReadPtr() : nullptr, .ringIdsCount = ringIds.has_value() ? (*ringIds)->getCount() : 0, - .rayTimeOffsets = timeOffsets.has_value() ? (*timeOffsets)->asSubclass()->getReadPtr() : nullptr, + .rayTimeOffsetsMs = timeOffsets.has_value() ? (*timeOffsets)->asSubclass()->getReadPtr() : nullptr, .rayTimeOffsetsCount = timeOffsets.has_value() ? (*timeOffsets)->getCount() : 0, + .rayMask = (rayMask != nullptr) ? rayMask->getReadPtr() : nullptr, .scene = sceneAS, .sceneTime = Scene::instance().getTime().value_or(Time::zero()).asSeconds(), .sceneDeltaTime = static_cast(Scene::instance().getDeltaTime().value_or(Time::zero()).asSeconds()), @@ -112,6 +123,9 @@ void RaytraceNode::enqueueExecImpl() .elevation = getPtrTo(), .normal = getPtrTo(), .incidentAngle = getPtrTo(), + .hBeamHalfDivergenceRad = hBeamHalfDivergenceRad, + .vBeamHalfDivergenceRad = vBeamHalfDivergenceRad, + .mrSamples = mrSamples.getPointers(), }; requestCtxDev->copyFrom(requestCtxHst); @@ -119,6 +133,32 @@ void RaytraceNode::enqueueExecImpl() std::size_t pipelineArgsSize = requestCtxDev->getSizeOf() * requestCtxDev->getCount(); CHECK_OPTIX(optixLaunch(Optix::getOrCreate().pipeline, getStreamHandle(), pipelineArgsPtr, pipelineArgsSize, &sceneSBT, launchDims.x, launchDims.y, launchDims.y)); + + if (hBeamHalfDivergenceRad > 0.0f && vBeamHalfDivergenceRad > 0.0f) { + gpuProcessBeamSamplesFirstLast(getStreamHandle(), raysNode->getRayCount(), MULTI_RETURN_BEAM_SAMPLES, + mrSamples.getPointers(), mrFirst.getPointers(), mrLast.getPointers(), raysPtr); + } +} + +IAnyArray::ConstPtr RaytraceNode::getFieldDataMultiReturn(rgl_field_t field, rgl_return_type_t type) +{ + if (type == RGL_RETURN_TYPE_FIRST) { + switch (field) { + case XYZ_VEC3_F32: return mrFirst.xyz; + case DISTANCE_F32: return mrFirst.distance; + case IS_HIT_I32: return mrFirst.isHit; + default: return getFieldData(field); + } + } + if (type == RGL_RETURN_TYPE_LAST) { + switch (field) { + case XYZ_VEC3_F32: return mrLast.xyz; + case DISTANCE_F32: return mrLast.distance; + case IS_HIT_I32: return mrLast.isHit; + default: return getFieldData(field); + } + } + throw InvalidPipeline(fmt::format("Unknown multi-return type ({})", type)); } void RaytraceNode::setFields(const std::set& fields) @@ -177,3 +217,8 @@ void RaytraceNode::setNonHitDistanceValues(float nearDistance, float farDistance nearNonHitDistance = nearDistance; farNonHitDistance = farDistance; } +void RaytraceNode::setNonHitsMask(const int8_t* maskRaw, size_t maskPointCount) +{ + rayMask = DeviceAsyncArray::create(arrayMgr); + rayMask->copyFromExternal(maskRaw, maskPointCount); +} diff --git a/src/math/Aabb.h b/src/math/Aabb.h new file mode 100644 index 000000000..4b1599358 --- /dev/null +++ b/src/math/Aabb.h @@ -0,0 +1,111 @@ +// Copyright 2024 Robotec.AI +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Vector.hpp" + +/** + * Axis-aligned bounding box, with dynamic dimension (> 0) and for arithmetic types. You can create an empty aabb or initialize + * it with center and size. You modify aabb: + * - by extending it with a new point or + * - by merging it with another aabb. + * Additionally, + and += operators are overloaded to provide convenient alternative for these operations. + */ +template 0) && std::is_arithmetic_v>> +class Aabb +{ +public: + Aabb() = default; + Aabb(const Vector& center, const Vector& size) : minValue{center - size / 2}, maxValue{center + size / 2} + {} + + Aabb(const Aabb& other) = default; + Aabb(Aabb&& other) = default; + + Aabb& operator=(const Aabb& other) = default; + Aabb& operator=(Aabb&& other) = default; + + Aabb operator+(const Vector& rhs) const + { + Aabb result = *this; + return result += rhs; + } + + Aabb operator+(const Aabb& rhs) const + { + Aabb result = *this; + return result += rhs; + } + + Aabb& operator+=(const Vector& rhs) + { + expand(rhs); + return *this; + } + + Aabb& operator+=(const Aabb& rhs) + { + merge(rhs); + return *this; + } + + const Vector& minCorner() const { return minValue; } + const Vector& maxCorner() const { return maxValue; } + + void expand(const Vector& point) + { + for (int i = 0; i < dim; ++i) { + minValue[i] = std::min(minValue[i], point[i]); + maxValue[i] = std::max(maxValue[i], point[i]); + } + } + + void merge(const Aabb& other) + { + expand(other.minValue); + expand(other.maxValue); + } + + void reset() + { + minValue = maxValue = {0}; + } + +private: + Vector minValue{0}; + Vector maxValue{0}; +}; + +using Aabb2Df = Aabb<2, float>; +using Aabb3Df = Aabb<3, float>; +using Aabb4Df = Aabb<4, float>; + +using Aabb2Di = Aabb<2, int>; +using Aabb3Di = Aabb<3, int>; +using Aabb4Di = Aabb<4, int>; + +static_assert(std::is_trivially_copyable_v); +static_assert(std::is_trivially_copyable_v); +static_assert(std::is_trivially_copyable_v); +static_assert(std::is_standard_layout_v); +static_assert(std::is_standard_layout_v); +static_assert(std::is_standard_layout_v); + +static_assert(std::is_trivially_copyable_v); +static_assert(std::is_trivially_copyable_v); +static_assert(std::is_trivially_copyable_v); +static_assert(std::is_standard_layout_v); +static_assert(std::is_standard_layout_v); +static_assert(std::is_standard_layout_v); diff --git a/src/math/RunningStats.hpp b/src/math/RunningStats.hpp new file mode 100644 index 000000000..e9212788f --- /dev/null +++ b/src/math/RunningStats.hpp @@ -0,0 +1,170 @@ +// Copyright 2024 Robotec.AI +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Vector.hpp" + +template class Base, class Checked> +struct is_specialization_of : std::false_type +{}; + +template class Base, int dim, typename T> +struct is_specialization_of> : std::true_type +{}; + +template +inline constexpr bool is_floating_point_vector_v = false; + +template class Base, int dim, typename T> +inline constexpr bool is_floating_point_vector_v> = std::is_floating_point_v && + is_specialization_of>::value; + +// Integral types here generally make no sense in terms of mean and std dev. +// Floating point types and any Vector specializations using floating point types are acceptable. +template || is_floating_point_vector_v>> +class RunningStats +{ +public: + using StatType = T; + + static constexpr RunningStats calculateFor(const std::vector& range) + { + RunningStats stats; + for (auto&& element : range) { + stats.addSample(element); + } + return stats; + } + + void addSample(StatType sample) + { + lastSample = std::move(sample); + ++counter; + + const auto delta = lastSample - mean; + mean += delta / counter; + m2 += delta * (lastSample - mean); + } + + size_t getSamplesCount() const { return counter; } + + StatType getLastSample() const { return lastSample; } + + StatType getMean() const { return mean; } + + StatType getVariance() const { return counter < 2 ? StatType{} : m2 / counter; } + + StatType getStdDev() const { return std::sqrt(getVariance()); } + + auto getMeanAndStdDev() const { return std::make_pair(getMean(), getStdDev()); } + +private: + StatType lastSample{0}; + size_t counter{0}; + + StatType mean{0}; + StatType m2{0}; +}; + +// RunningStats specialization targeting Vector types. This is important because of the details in addSample method and additional methods provided (covariance).) +template +class RunningStats> +{ +public: + using StatType = Vector; + + static constexpr RunningStats calculateFor(const std::vector& range) + { + RunningStats stats; + for (auto&& element : range) { + stats.addSample(element); + } + return stats; + } + + void addSample(StatType sample) + { + lastSample = std::move(sample); + ++counter; + + const auto prevDelta = lastSample - mean; + mean += prevDelta / counter; + const auto currentDelta = lastSample - mean; + + m2 += prevDelta * currentDelta; + + if constexpr (dim >= 2) { + sumCov[0] = sumCov.x() + prevDelta.x() * currentDelta.y(); // covariance xy + } + + if constexpr (dim >= 3) { + sumCov[1] = sumCov.y() + prevDelta.y() * currentDelta.z(); // covariance yz + sumCov[2] = sumCov.z() + prevDelta.z() * currentDelta.x(); // covariance zx + } + } + + size_t getSamplesCount() const { return counter; } + + const StatType& getLastSample() const { return lastSample; } + + StatType getMean() const { return mean; } + + StatType getVariance() const { return counter < 2 ? StatType{0} : m2 / counter; } + + template= 2>> + T getCovarianceXY() const + { + return counter < 2 ? T{0} : sumCov.x() / counter; + } + + template= 3>> + T getCovarianceYZ() const + { + return counter < 2 ? T{0} : sumCov.y() / counter; + } + + template= 3>> + T getCovarianceZX() const + { + return counter < 2 ? T{0} : sumCov.z() / counter; + } + + StatType getStdDev() const + { + const auto variance = getVariance(); + + if constexpr (dim == 2) { + return {std::sqrt(variance.x()), std::sqrt(variance.y())}; + } + + if constexpr (dim == 3) { + return {std::sqrt(variance.x()), std::sqrt(variance.y()), std::sqrt(variance.z())}; + } + + if constexpr (dim == 4) { + return {std::sqrt(variance[0]), std::sqrt(variance[1]), std::sqrt(variance[2]), std::sqrt(variance[3])}; + } + } + + auto getMeanAndStdDev() const { return std::make_pair(getMean(), getStdDev()); } + +private: + StatType lastSample{0}; + size_t counter{0}; + + StatType mean{0}; + StatType m2{0}; + StatType sumCov{0}; //< Sum for calculating covariance. +}; diff --git a/src/math/Vector.hpp b/src/math/Vector.hpp index a36a085ae..95720da7f 100644 --- a/src/math/Vector.hpp +++ b/src/math/Vector.hpp @@ -44,7 +44,8 @@ struct Vector Vector(const std::array& values) { std::copy(values.begin(), values.end(), row); } // List constructor - template + template= 2>::type> HostDevFn Vector(Args... args) : row{static_cast(args)...} { static_assert(sizeof...(Args) == dim); diff --git a/src/tape/TapeCore.hpp b/src/tape/TapeCore.hpp index c8b816bca..adde660c4 100644 --- a/src/tape/TapeCore.hpp +++ b/src/tape/TapeCore.hpp @@ -53,6 +53,8 @@ class TapeCore static void tape_node_raytrace_configure_velocity(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_raytrace_configure_distortion(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_raytrace_configure_non_hits(const YAML::Node& yamlNode, PlaybackState& state); + static void tape_node_raytrace_configure_mask(const YAML::Node& yamlNode, PlaybackState& state); + static void tape_node_raytrace_configure_beam_divergence(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_points_format(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_points_yield(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_points_compact(const YAML::Node& yamlNode, PlaybackState& state); @@ -62,9 +64,12 @@ class TapeCore static void tape_node_points_from_array(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_points_filter_ground(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_points_radar_postprocess(const YAML::Node& yamlNode, PlaybackState& state); + static void tape_node_points_radar_track_objects(const YAML::Node& yamlNode, PlaybackState& state); + static void tape_node_points_radar_set_classes(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_gaussian_noise_angular_ray(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_gaussian_noise_angular_hitpoint(const YAML::Node& yamlNode, PlaybackState& state); static void tape_node_gaussian_noise_distance(const YAML::Node& yamlNode, PlaybackState& state); + static void tape_node_multi_return_switch(const YAML::Node& yamlNode, PlaybackState& state); // Called once in the translation unit static inline bool autoExtendTapeFunctions = std::invoke([]() { @@ -104,6 +109,9 @@ class TapeCore TAPE_CALL_MAPPING("rgl_node_raytrace_configure_velocity", TapeCore::tape_node_raytrace_configure_velocity), TAPE_CALL_MAPPING("rgl_node_raytrace_configure_distortion", TapeCore::tape_node_raytrace_configure_distortion), TAPE_CALL_MAPPING("rgl_node_raytrace_configure_non_hits", TapeCore::tape_node_raytrace_configure_non_hits), + TAPE_CALL_MAPPING("rgl_node_raytrace_configure_mask", TapeCore::tape_node_raytrace_configure_mask), + TAPE_CALL_MAPPING("rgl_node_raytrace_configure_beam_divergence", + TapeCore::tape_node_raytrace_configure_beam_divergence), TAPE_CALL_MAPPING("rgl_node_points_format", TapeCore::tape_node_points_format), TAPE_CALL_MAPPING("rgl_node_points_yield", TapeCore::tape_node_points_yield), TAPE_CALL_MAPPING("rgl_node_points_compact", TapeCore::tape_node_points_compact), @@ -113,9 +121,12 @@ class TapeCore TAPE_CALL_MAPPING("rgl_node_points_from_array", TapeCore::tape_node_points_from_array), TAPE_CALL_MAPPING("rgl_node_points_filter_ground", TapeCore::tape_node_points_filter_ground), TAPE_CALL_MAPPING("rgl_node_points_radar_postprocess", TapeCore::tape_node_points_radar_postprocess), + TAPE_CALL_MAPPING("rgl_node_points_radar_track_objects", TapeCore::tape_node_points_radar_track_objects), + TAPE_CALL_MAPPING("rgl_node_points_radar_set_classes", TapeCore::tape_node_points_radar_set_classes), TAPE_CALL_MAPPING("rgl_node_gaussian_noise_angular_ray", TapeCore::tape_node_gaussian_noise_angular_ray), TAPE_CALL_MAPPING("rgl_node_gaussian_noise_angular_hitpoint", TapeCore::tape_node_gaussian_noise_angular_hitpoint), TAPE_CALL_MAPPING("rgl_node_gaussian_noise_distance", TapeCore::tape_node_gaussian_noise_distance), + TAPE_CALL_MAPPING("rgl_node_multi_return_switch", TapeCore::tape_node_multi_return_switch), }; TapePlayer::extendTapeFunctions(tapeFunctions); return true; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 7131fcee5..ebe361f6f 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -24,8 +24,10 @@ set(RGL_TEST_FILES src/graph/nodes/GaussianNoiseAngularHitpointNodeTest.cpp src/graph/nodes/GaussianNoiseAngularRayNodeTest.cpp src/graph/nodes/GaussianNoiseDistanceNodeTest.cpp + src/graph/MaskRaysTest.cpp src/graph/nodes/RaytraceNodeTest.cpp src/graph/nodes/RadarPostprocessPointsNodeTest.cpp + src/graph/nodes/RadarTrackObjectsNodeTest.cpp src/graph/nodes/SetRingIdsRaysNodeTest.cpp src/graph/nodes/SetTimeOffsetsRaysNodeTest.cpp src/graph/nodes/SpatialMergePointsNodeTest.cpp @@ -48,6 +50,7 @@ set(RGL_TEST_FILES src/synchronization/graphThreadSynchronization.cpp src/synchronization/testKernel.cu src/scene/incidentAngleTest.cpp + src/graph/multiReturnTest.cpp ) if (RGL_BUILD_ROS2_EXTENSION) @@ -121,4 +124,4 @@ endif() include(GoogleTest) -gtest_discover_tests(RobotecGPULidar_test) \ No newline at end of file +gtest_discover_tests(RobotecGPULidar_test) diff --git a/test/include/helpers/geometryData.hpp b/test/include/helpers/geometryData.hpp index a3c0d0b6c..59037617f 100644 --- a/test/include/helpers/geometryData.hpp +++ b/test/include/helpers/geometryData.hpp @@ -3,6 +3,7 @@ #include static constexpr float CUBE_HALF_EDGE = 1.0; +static constexpr float CUBE_EDGE = 2.0; static rgl_vec3f cubeVertices[] = { {-1, -1, -1}, // 0 diff --git a/test/include/helpers/lidarHelpers.hpp b/test/include/helpers/lidarHelpers.hpp index e5ca676f3..e6856761b 100644 --- a/test/include/helpers/lidarHelpers.hpp +++ b/test/include/helpers/lidarHelpers.hpp @@ -45,3 +45,33 @@ static std::vector makeGridOfParallelRays(Vec2f minCoord, Vec2f max } return rays; } + + +#if RGL_BUILD_ROS2_EXTENSION +#include + +#include + +/** +* @brief Auxiliary lidar graph for imaging entities on the scene +*/ +static rgl_node_t constructCameraGraph(const rgl_mat3x4f& cameraPose, const char* cameraName = "camera") +{ + const std::vector fields{XYZ_VEC3_F32}; + const std::vector cameraRayTf = makeLidar3dRays(360.0f, 180.0f, 0.5f, 0.5f); + rgl_node_t cameraRays = nullptr, cameraTransform = nullptr, cameraRaytrace = nullptr, cameraFormat = nullptr, + cameraPublish = nullptr; + + EXPECT_RGL_SUCCESS(rgl_node_rays_from_mat3x4f(&cameraRays, cameraRayTf.data(), cameraRayTf.size())); + EXPECT_RGL_SUCCESS(rgl_node_rays_transform(&cameraTransform, &cameraPose)); + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&cameraRaytrace, nullptr)); + EXPECT_RGL_SUCCESS(rgl_node_points_format(&cameraFormat, fields.data(), fields.size())); + EXPECT_RGL_SUCCESS(rgl_node_points_ros2_publish(&cameraPublish, cameraName, "world")); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(cameraRays, cameraTransform)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(cameraTransform, cameraRaytrace)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(cameraRaytrace, cameraFormat)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(cameraFormat, cameraPublish)); + + return cameraRays; +} +#endif \ No newline at end of file diff --git a/test/include/helpers/radarHelpers.hpp b/test/include/helpers/radarHelpers.hpp new file mode 100644 index 000000000..25389e317 --- /dev/null +++ b/test/include/helpers/radarHelpers.hpp @@ -0,0 +1,99 @@ +#pragma once + +#include +#include + +struct PostProcessNodeParams +{ + rgl_radar_scope_t scope; + int32_t radarScopesCount; + float azimuthStepRad; + float elevationStepRad; + float frequencyHz; + float powerTransmittedDdm; + float antennaGainDbi; + float noiseMean; + float noiseStdDev; +}; + +struct TrackObjectNodeParams +{ + float distanceThreshold; + float azimuthThreshold; + float elevationThreshold; + float radialSpeedThreshold; + float maxMatchingDistance; + float maxPredictionTimeFrame; + float movementSensitivity; +}; + +#if RGL_BUILD_ROS2_EXTENSION +#include + +static void constructRadarPostProcessObjectTrackingGraph( + const std::vector& radarRays, const rgl_mat3x4f& radarRayTf, rgl_node_t& postProcessNode, + const PostProcessNodeParams& postProcessNodeParams, rgl_node_t& trackObjectNode, + const TrackObjectNodeParams& trackObjectNodeParams, const std::vector::type>& entityIds, + const std::vector& objectClasses, bool withPublish = false) +{ + rgl_node_t radarRaysNode = nullptr, radarRaysTfNode = nullptr, raytraceNode = nullptr, noiseNode = nullptr, + compactNode = nullptr, formatPostProcessNode = nullptr, formatTrackObjectNode = nullptr, + ros2PublishPostProcessNode = nullptr, ros2PublishTrackObjectNode = nullptr; + + ASSERT_RGL_SUCCESS(rgl_node_rays_from_mat3x4f(&radarRaysNode, radarRays.data(), radarRays.size())); + ASSERT_RGL_SUCCESS(rgl_node_rays_transform(&radarRaysTfNode, &radarRayTf)); + ASSERT_RGL_SUCCESS(rgl_node_raytrace(&raytraceNode, nullptr)); + ASSERT_RGL_SUCCESS( + rgl_node_points_compact_by_field(&compactNode, IS_HIT_I32)); // RadarComputeEnergyPointsNode requires dense input + ASSERT_RGL_SUCCESS(rgl_node_points_radar_postprocess( + &postProcessNode, &postProcessNodeParams.scope, postProcessNodeParams.radarScopesCount, + postProcessNodeParams.azimuthStepRad, postProcessNodeParams.elevationStepRad, postProcessNodeParams.frequencyHz, + postProcessNodeParams.powerTransmittedDdm, postProcessNodeParams.antennaGainDbi, postProcessNodeParams.noiseMean, + postProcessNodeParams.noiseStdDev)); + ASSERT_RGL_SUCCESS(rgl_node_points_radar_track_objects( + &trackObjectNode, trackObjectNodeParams.distanceThreshold, trackObjectNodeParams.azimuthThreshold, + trackObjectNodeParams.elevationThreshold, trackObjectNodeParams.radialSpeedThreshold, + trackObjectNodeParams.maxMatchingDistance, trackObjectNodeParams.maxPredictionTimeFrame, + trackObjectNodeParams.movementSensitivity)); + ASSERT_RGL_SUCCESS( + rgl_node_points_radar_set_classes(trackObjectNode, entityIds.data(), objectClasses.data(), entityIds.size())); + + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(radarRaysNode, radarRaysTfNode)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(radarRaysTfNode, raytraceNode)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(raytraceNode, compactNode)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(compactNode, postProcessNode)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(postProcessNode, trackObjectNode)); + + if (withPublish) { + const std::vector formatFields = {XYZ_VEC3_F32, ENTITY_ID_I32}; + ASSERT_RGL_SUCCESS(rgl_node_points_format(&formatPostProcessNode, formatFields.data(), formatFields.size())); + ASSERT_RGL_SUCCESS(rgl_node_points_format(&formatTrackObjectNode, formatFields.data(), formatFields.size())); + ASSERT_RGL_SUCCESS(rgl_node_points_ros2_publish(&ros2PublishPostProcessNode, "radar_detection", "world")); + ASSERT_RGL_SUCCESS(rgl_node_points_ros2_publish(&ros2PublishTrackObjectNode, "radar_track_object", "world")); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(trackObjectNode, formatTrackObjectNode)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(postProcessNode, formatPostProcessNode)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(formatPostProcessNode, ros2PublishPostProcessNode)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(formatTrackObjectNode, ros2PublishTrackObjectNode)); + } +} +#endif + +static std::vector genRadarRays(const float minAzimuth, const float maxAzimuth, const float minElevation, + const float maxElevation, const float azimuthStep, const float elevationStep) +{ + std::vector rays; + for (auto a = minAzimuth; a <= maxAzimuth; a += azimuthStep) { + for (auto e = minElevation; e <= maxElevation; e += elevationStep) { + // By default, the rays are directed along the Z-axis + // So first, we rotate them around the Y-axis to point towards the X-axis (to be RVIZ2 compatible) + // Then, rotation around Z is azimuth, around Y is elevation + const auto ray = Mat3x4f::rotationDeg(0, e, a) * Mat3x4f::rotationDeg(0, 90, 0); + rays.emplace_back(ray.toRGL()); + + const auto rayDir = ray * Vec3f{0, 0, 1}; + //printf("rayDir: %.2f %.2f %.2f\n", rayDir.x(), rayDir.y(), rayDir.z()); + } + } + + return rays; +} \ No newline at end of file diff --git a/test/include/helpers/testPointCloud.hpp b/test/include/helpers/testPointCloud.hpp index e2e3e5b97..397541f65 100644 --- a/test/include/helpers/testPointCloud.hpp +++ b/test/include/helpers/testPointCloud.hpp @@ -241,7 +241,7 @@ class TestPointCloud } template - std::vector::type> getFieldValues() + std::vector::type> getFieldValues() const { int fieldIndex = std::find(fields.begin(), fields.end(), T) - fields.begin(); @@ -252,7 +252,7 @@ class TestPointCloud for (int i = 0; i < getPointCount(); i++) { fieldValues.emplace_back( - *reinterpret_cast::type*>(data.data() + i * getPointByteSize() + offset)); + *reinterpret_cast::type*>(data.data() + i * getPointByteSize() + offset)); } return fieldValues; diff --git a/test/src/TapeTest.cpp b/test/src/TapeTest.cpp index 81b0abc35..565349b35 100644 --- a/test/src/TapeTest.cpp +++ b/test/src/TapeTest.cpp @@ -233,6 +233,10 @@ TEST_F(TapeTest, RecordPlayAllCalls) float farNonHitDistance = 2.0f; EXPECT_RGL_SUCCESS(rgl_node_raytrace_configure_non_hits(raytrace, nearNonHitDistance, farNonHitDistance)); + float hBeamDivergence = 0.1f; + float vBeamDivergence = 0.1f; + EXPECT_RGL_SUCCESS(rgl_node_raytrace_configure_beam_divergence(raytrace, hBeamDivergence, vBeamDivergence)); + rgl_node_t format = nullptr; std::vector fields = {RGL_FIELD_XYZ_VEC3_F32, RGL_FIELD_DISTANCE_F32}; EXPECT_RGL_SUCCESS(rgl_node_points_format(&format, fields.data(), fields.size())); @@ -268,6 +272,15 @@ TEST_F(TapeTest, RecordPlayAllCalls) EXPECT_RGL_SUCCESS( rgl_node_points_radar_postprocess(&radarPostprocess, &radarScope, 1, 1.0f, 1.0f, 79E9f, 31.0f, 27.0f, 60.0f, 1.0f)); + rgl_node_t radarTrackObjects = nullptr; + EXPECT_RGL_SUCCESS(rgl_node_points_radar_track_objects(&radarTrackObjects, 2.0f, 0.1f, 0.1f, 0.5f, 1.0f, 500.0f, 0.01f)); + + std::vector::type> entityIds = {1, 2, 3}; + std::vector objectClasses = {RGL_RADAR_CLASS_CAR, RGL_RADAR_CLASS_TRUCK, + RGL_RADAR_CLASS_MOTORCYCLE}; + EXPECT_RGL_SUCCESS( + rgl_node_points_radar_set_classes(radarTrackObjects, entityIds.data(), objectClasses.data(), entityIds.size())); + rgl_node_t filterGround = nullptr; rgl_vec3f sensorUpVector = {0.0f, 1.0f, 0.0f}; EXPECT_RGL_SUCCESS(rgl_node_points_filter_ground(&filterGround, &sensorUpVector, 0.1f)); @@ -405,4 +418,4 @@ TEST_F(TapeTest, SceneReconstruction) rgl_tape_play(cubeSceneRecordPath.c_str()); testCubeSceneOnGraph(); -} \ No newline at end of file +} diff --git a/test/src/graph/MaskRaysTest.cpp b/test/src/graph/MaskRaysTest.cpp new file mode 100644 index 000000000..f28ff5676 --- /dev/null +++ b/test/src/graph/MaskRaysTest.cpp @@ -0,0 +1,98 @@ +#include "helpers/testPointCloud.hpp" +#include "helpers/commonHelpers.hpp" +#include "RGLFields.hpp" +#include "helpers/sceneHelpers.hpp" +#include "helpers/lidarHelpers.hpp" + +class MaskRaysTest : public RGLTest +{ + +protected: + const int maskCount = 100; + std::vector points_mask; + std::vector fields = {XYZ_VEC3_F32, IS_HIT_I32, INTENSITY_F32, IS_GROUND_I32}; + + void initializeMask(int raysCount) + { + points_mask.resize(raysCount); + std::fill(points_mask.begin(), points_mask.end(), 1); + for (int i = 0; i < maskCount; i++) { + points_mask[i] = 0; + } + } +}; + +TEST_F(MaskRaysTest, invalid_argument_node) { EXPECT_RGL_INVALID_ARGUMENT(rgl_node_raytrace_configure_mask(nullptr, nullptr, 0)); } + +TEST_F(MaskRaysTest, invalid_argument_mask) +{ + rgl_node_t raytraceNode = nullptr; + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&raytraceNode, nullptr)); + + EXPECT_RGL_INVALID_ARGUMENT(rgl_node_raytrace_configure_mask(raytraceNode, nullptr, 0)); +} + +TEST_F(MaskRaysTest, invalid_argument_count) +{ + rgl_node_t raytraceNode = nullptr; + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&raytraceNode, nullptr)); + + EXPECT_RGL_INVALID_ARGUMENT(rgl_node_raytrace_configure_mask(raytraceNode, points_mask.data(), 0)); +} + +TEST_F(MaskRaysTest, valid_arguments) +{ + const int raysNumber = 100; + rgl_node_t raytraceNode = nullptr; + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&raytraceNode, nullptr)); + + // Mask + initializeMask(raysNumber); + + EXPECT_RGL_SUCCESS(rgl_node_raytrace_configure_mask(raytraceNode, points_mask.data(), raysNumber)); +} + +TEST_F(MaskRaysTest, use_case) +{ + // Scene + rgl_entity_t wall = makeEntity(makeCubeMesh()); + rgl_mat3x4f wallPose = Mat3x4f::TRS(Vec3f(0,0,0), Vec3f(0, 0, 0), Vec3f(10,10,10)).toRGL(); + EXPECT_RGL_SUCCESS(rgl_entity_set_pose(wall, &wallPose)); + + // Rays + std::vector rays = makeLidar3dRays(360, 180, 0.36, 0.18); + + const int raysNumber = rays.size(); + // Mask + initializeMask(raysNumber); + + // Graph + rgl_node_t useRaysNode = nullptr; + rgl_node_t raytraceNode = nullptr; + rgl_node_t compactNode = nullptr; + rgl_node_t yieldNode = nullptr; + + // Prepare graph without filtering + EXPECT_RGL_SUCCESS(rgl_node_rays_from_mat3x4f(&useRaysNode, rays.data(), rays.size())); + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&raytraceNode, nullptr)); + EXPECT_RGL_SUCCESS(rgl_node_points_compact_by_field(&compactNode, IS_HIT_I32)); + EXPECT_RGL_SUCCESS(rgl_node_points_yield(&yieldNode, fields.data(), fields.size())); + + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(useRaysNode, raytraceNode)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(raytraceNode, compactNode)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(compactNode, yieldNode)); + + EXPECT_RGL_SUCCESS(rgl_graph_run(raytraceNode)); + + TestPointCloud outputPointCloud = TestPointCloud::createFromNode(yieldNode, fields); + auto fullCloudSize = outputPointCloud.getPointCount(); + + EXPECT_RGL_SUCCESS(rgl_node_raytrace_configure_mask(raytraceNode, points_mask.data(), raysNumber)); + + EXPECT_RGL_SUCCESS(rgl_graph_run(raytraceNode)); + TestPointCloud outputPointCloudMasked = TestPointCloud::createFromNode(yieldNode, fields); + auto maskedCloudSize = outputPointCloudMasked.getPointCount(); + + EXPECT_EQ(fullCloudSize - maskCount, maskedCloudSize); + +} \ No newline at end of file diff --git a/test/src/graph/multiReturnTest.cpp b/test/src/graph/multiReturnTest.cpp new file mode 100644 index 000000000..311a83e88 --- /dev/null +++ b/test/src/graph/multiReturnTest.cpp @@ -0,0 +1,394 @@ +#include +#include "helpers/lidarHelpers.hpp" +#include "helpers/testPointCloud.hpp" +#include "helpers/sceneHelpers.hpp" + +#include "RGLFields.hpp" +#include "math/Mat3x4f.hpp" +#include "gpu/MultiReturn.hpp" + +#include + +using namespace std::chrono_literals; + +#if RGL_BUILD_ROS2_EXTENSION +#include +#endif + +class GraphMultiReturn : public RGLTest +{ +protected: + // VLP16 data + const float vlp16LidarObjectDistance = 100.0f; + const float vlp16LidarHBeamDivergence = 0.003f; // Velodyne VLP16 horizontal beam divergence in rads + const float vlp16LidarVBeamDivergence = 0.0015f; // Velodyne VLP16 vertical beam divergence in rads + const float vlp16LidarHBeamDiameter = 0.2868f; // Velodyne VLP16 beam horizontal diameter at 100m + const float vlp16LidarVBeamDiameter = 0.1596f; // Velodyne VLP16 beam vertical diameter at 100m + + std::vector vlp16Channels{Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(11.2f)}, {0.0f, -15.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(0.7f)}, {0.0f, +1.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(9.7f)}, {0.0f, -13.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(-2.2f)}, {0.0f, +3.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(8.1f)}, {0.0f, -11.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(-3.7f)}, {0.0f, +5.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(6.6f)}, {0.0f, -9.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(-5.1f)}, {0.0f, +7.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(5.1f)}, {0.0f, -7.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(-6.6f)}, {0.0f, +9.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(3.7f)}, {0.0f, -5.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(-8.1f)}, {0.0f, +11.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(2.2f)}, {0.0f, -3.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(-9.7f)}, {0.0f, +13.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(0.7f)}, {0.0f, -1.0f, 0.0f}), + Mat3x4f::TRS({0.0f, 0.0f, mmToMeters(-11.2f)}, {0.0f, +15.0f, 0.0f})}; + + const std::vector fields = {XYZ_VEC3_F32, IS_HIT_I32, DISTANCE_F32 /*, INTENSITY_F32, ENTITY_ID_I32*/}; + + rgl_node_t rays = nullptr, mrRays = nullptr, cameraRays = nullptr, transform = nullptr, mrTransform = nullptr, + cameraTransform = nullptr, raytrace = nullptr, mrRaytrace = nullptr, cameraRaytrace = nullptr, mrFirst = nullptr, + mrLast = nullptr, format = nullptr, mrFormatFirst = nullptr, mrFormatLast = nullptr, cameraFormat = nullptr, + publish = nullptr, cameraPublish = nullptr, mrPublishFirst = nullptr, mrPublishLast = nullptr, + compactFirst = nullptr, compactLast = nullptr; + + void constructMRGraph(const std::vector& raysTf, const rgl_mat3x4f& lidarPose, const float hBeamDivAngle, + const float vBeamDivAngle, bool withPublish = false) + { + EXPECT_RGL_SUCCESS(rgl_node_rays_from_mat3x4f(&mrRays, raysTf.data(), raysTf.size())); + EXPECT_RGL_SUCCESS(rgl_node_rays_transform(&mrTransform, &lidarPose)); + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&mrRaytrace, nullptr)); + EXPECT_RGL_SUCCESS(rgl_node_raytrace_configure_beam_divergence(mrRaytrace, hBeamDivAngle, vBeamDivAngle)); + EXPECT_RGL_SUCCESS(rgl_node_multi_return_switch(&mrFirst, RGL_RETURN_TYPE_FIRST)); + EXPECT_RGL_SUCCESS(rgl_node_multi_return_switch(&mrLast, RGL_RETURN_TYPE_LAST)); + EXPECT_RGL_SUCCESS(rgl_node_points_compact_by_field(&compactFirst, RGL_FIELD_IS_HIT_I32)); + EXPECT_RGL_SUCCESS(rgl_node_points_compact_by_field(&compactLast, RGL_FIELD_IS_HIT_I32)); + EXPECT_RGL_SUCCESS(rgl_node_points_format(&mrFormatFirst, fields.data(), fields.size())); + EXPECT_RGL_SUCCESS(rgl_node_points_format(&mrFormatLast, fields.data(), fields.size())); +#if RGL_BUILD_ROS2_EXTENSION + if (withPublish) { + EXPECT_RGL_SUCCESS(rgl_node_points_ros2_publish(&mrPublishFirst, "MRTest_First", "world")); + EXPECT_RGL_SUCCESS(rgl_node_points_ros2_publish(&mrPublishLast, "MRTest_Last", "world")); + } +#else + if (withPublish) { + GTEST_SKIP() << "Publishing is not supported without ROS2 extension. Skippping the test."; + } +#endif + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(mrRays, mrTransform)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(mrTransform, mrRaytrace)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(mrRaytrace, mrFirst)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(mrRaytrace, mrLast)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(mrFirst, compactFirst)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(mrLast, compactLast)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(compactFirst, mrFormatFirst)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(compactLast, mrFormatLast)); + if (withPublish) { + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(mrFormatFirst, mrPublishFirst)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(mrFormatLast, mrPublishLast)); + } + } + + void constructCameraGraph(const rgl_mat3x4f& cameraPose) + { +#if RGL_BUILD_ROS2_EXTENSION + const std::vector cameraRayTf = makeLidar3dRays(360.0f, 180.0f, 1.0f, 1.0f); + EXPECT_RGL_SUCCESS(rgl_node_rays_from_mat3x4f(&cameraRays, cameraRayTf.data(), cameraRayTf.size())); + EXPECT_RGL_SUCCESS(rgl_node_rays_transform(&cameraTransform, &cameraPose)); + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&cameraRaytrace, nullptr)); + EXPECT_RGL_SUCCESS(rgl_node_points_format(&cameraFormat, fields.data(), fields.size())); + EXPECT_RGL_SUCCESS(rgl_node_points_ros2_publish(&cameraPublish, "MRTest_Camera", "world")); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(cameraRays, cameraTransform)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(cameraTransform, cameraRaytrace)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(cameraRaytrace, cameraFormat)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(cameraFormat, cameraPublish)); +#else + GTEST_SKIP() << "Publishing is not supported without ROS2 extension. Skippping the test with camera."; +#endif + } + + void spawnStairsOnScene(const float stepWidth, const float stepHeight, const float stepDepth, const float stairsBaseHeight, + const Vec3f& stairsTranslation) const + { + const Vec3f cubeHalfEdgeScaled{CUBE_HALF_EDGE * stepDepth / CUBE_EDGE, CUBE_HALF_EDGE * stepWidth / CUBE_EDGE, + CUBE_HALF_EDGE * stepHeight / CUBE_EDGE}; + + const auto firstCubeTf = + Mat3x4f::translation(stairsTranslation) * + Mat3x4f::TRS({cubeHalfEdgeScaled.x(), 0.0f, -cubeHalfEdgeScaled.z() + stairsBaseHeight + stepHeight}, + {0.0f, 0.0f, 0.0f}, {stepDepth / CUBE_EDGE, stepWidth / CUBE_EDGE, stepHeight / CUBE_EDGE}); + spawnCubeOnScene(firstCubeTf); + spawnCubeOnScene(Mat3x4f::translation(stepDepth, 0.0f, stepHeight) * firstCubeTf); + spawnCubeOnScene(Mat3x4f::translation(2 * stepDepth, 0.0f, 2 * stepHeight) * firstCubeTf); + } + + constexpr float mmToMeters(float mm) const { return mm * 0.001f; } +}; + +/** + * This test verifies the accuracy of multiple return handling for the data specified for LiDAR VLP16 + * by firing a single beam into a cube and making sure the first and last hits are correctly calculated. + */ +TEST_F(GraphMultiReturn, vlp16_data_compare) +{ + // Lidar + const std::vector raysTf{Mat3x4f::TRS({0.0f, 0.0f, 0.0f}, {90.0f, 0.0f, -90.0f}).toRGL()}; + const float lidarCubeFaceDist = vlp16LidarObjectDistance; + const float lidarCubeCenterDist = lidarCubeFaceDist + CUBE_HALF_EDGE; + const auto lidarTransl = Vec3f{lidarCubeCenterDist, 0.0f, 0.0f}; + const rgl_mat3x4f lidarPose = Mat3x4f::TRS(lidarTransl).toRGL(); + + // Scene + spawnCubeOnScene(Mat3x4f::identity()); + + constructMRGraph(raysTf, lidarPose, vlp16LidarHBeamDivergence, vlp16LidarVBeamDivergence); + + EXPECT_RGL_SUCCESS(rgl_graph_run(mrRays)); + + // Verify the output + const float epsilon = 1e-4f; + + const auto mrFirstOutPointcloud = TestPointCloud::createFromNode(mrFormatFirst, fields); + const auto mrFirstIsHits = mrFirstOutPointcloud.getFieldValues(); + const auto mrFirstPoints = mrFirstOutPointcloud.getFieldValues(); + const auto mrFirstDistances = mrFirstOutPointcloud.getFieldValues(); + const auto expectedFirstPoint = Vec3f{CUBE_HALF_EDGE, 0.0f, 0.0f}; + EXPECT_EQ(mrFirstOutPointcloud.getPointCount(), raysTf.size()); + EXPECT_TRUE(mrFirstIsHits.at(0)); + EXPECT_NEAR(mrFirstDistances.at(0), lidarCubeFaceDist, epsilon); + EXPECT_NEAR(mrFirstPoints.at(0).x(), expectedFirstPoint.x(), epsilon); + EXPECT_NEAR(mrFirstPoints.at(0).y(), expectedFirstPoint.y(), epsilon); + EXPECT_NEAR(mrFirstPoints.at(0).z(), expectedFirstPoint.z(), epsilon); + + const auto mrLastOutPointcloud = TestPointCloud::createFromNode(mrFormatLast, fields); + const auto mrLastIsHits = mrLastOutPointcloud.getFieldValues(); + const auto mrLastPoints = mrLastOutPointcloud.getFieldValues(); + const auto mrLastDistances = mrLastOutPointcloud.getFieldValues(); + const float expectedDiameter = std::max(vlp16LidarHBeamDiameter, vlp16LidarVBeamDiameter); + const auto expectedLastDistance = static_cast(sqrt(pow(lidarCubeFaceDist, 2) + pow(expectedDiameter / 2, 2))); + // Substract because the ray is pointing as is the negative X axis + const auto expectedLastPoint = lidarTransl - Vec3f{expectedLastDistance, 0.0f, 0.0f}; + EXPECT_EQ(mrLastOutPointcloud.getPointCount(), raysTf.size()); + EXPECT_TRUE(mrLastIsHits.at(0)); + EXPECT_NEAR(mrLastDistances.at(0), expectedLastDistance, epsilon); + EXPECT_NEAR(mrLastPoints.at(0).x(), expectedLastPoint.x(), epsilon); + EXPECT_NEAR(mrLastPoints.at(0).y(), expectedLastPoint.y(), epsilon); + EXPECT_NEAR(mrLastPoints.at(0).z(), expectedLastPoint.z(), epsilon); +} + +#if RGL_BUILD_ROS2_EXTENSION +/** + * This test verifies the performance of the multiple return feature in a dynamic scene + * with two cubes placed one behind the other, one cube cyclically moving sideways. + * LiDAR fires the beam in such a way that in some frames the beam partially overlaps the edge of the moving cube. + */ +TEST_F(GraphMultiReturn, cube_in_motion) +{ + /* + * gap + * ________ <----> + * | | | + * | | | + * |________| | + * | + * X + * LIDAR + */ + + GTEST_SKIP(); // Comment to run the test + + // Lidar + const std::vector raysTf{Mat3x4f::TRS({0.0f, 0.0f, 0.0f}, {90.0f, 0.0f, 90.0f}).toRGL()}; + const float lidarCubeFaceDist = 100.0f; + const float lidarCubeCenterDist = lidarCubeFaceDist + CUBE_HALF_EDGE * 2; + const Vec3f lidarTransl = {-lidarCubeCenterDist, 3.0f, 3.0f}; + const rgl_mat3x4f lidarPose = Mat3x4f::translation(lidarTransl).toRGL(); + + // Scene + const Vec2f gapRange = {0.001f, 0.5f}; + const std::vector entitiesTransforms = { + Mat3x4f::TRS(Vec3f{-5.0f, lidarTransl.y() + gapRange.x() + CUBE_HALF_EDGE, lidarTransl.z()}), + Mat3x4f::TRS(Vec3f{0.0f, lidarTransl.y(), lidarTransl.z()}, {0, 0, 0}, {2, 2, 2})}; + std::vector entities = {spawnCubeOnScene(entitiesTransforms.at(0)), + spawnCubeOnScene(entitiesTransforms.at(1))}; + + // Lidar with MR + constructMRGraph(raysTf, lidarPose, vlp16LidarHBeamDivergence, vlp16LidarVBeamDivergence, true); + + // Lidar without MR + EXPECT_RGL_SUCCESS(rgl_node_rays_from_mat3x4f(&rays, raysTf.data(), raysTf.size())); + EXPECT_RGL_SUCCESS(rgl_node_rays_transform(&transform, &lidarPose)); + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&raytrace, nullptr)); + EXPECT_RGL_SUCCESS(rgl_node_points_format(&format, fields.data(), fields.size())); + EXPECT_RGL_SUCCESS(rgl_node_points_ros2_publish(&publish, "MRTest_Lidar_Without_MR", "world")); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(rays, transform)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(transform, raytrace)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(raytrace, format)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(format, publish)); + + // Camera + const rgl_mat3x4f cameraPose = Mat3x4f::TRS(Vec3f{-8.0f, 1.0f, 5.0f}, {90.0f, 30.0f, -70.0f}).toRGL(); + constructCameraGraph(cameraPose); + + int frameId = 0; + while (true) { + + const auto newPose = (entitiesTransforms.at(0) * + Mat3x4f::translation(0.0f, std::abs(std::sin(frameId * 0.05f)) * gapRange.y(), 0.0f)) + .toRGL(); + EXPECT_RGL_SUCCESS(rgl_entity_set_pose(entities.at(0), &newPose)); + + ASSERT_RGL_SUCCESS(rgl_graph_run(cameraRays)); + ASSERT_RGL_SUCCESS(rgl_graph_run(rays)); + ASSERT_RGL_SUCCESS(rgl_graph_run(mrRays)); + + std::this_thread::sleep_for(50ms); + frameId += 1; + } +} +#endif + +#if RGL_BUILD_ROS2_EXTENSION +/** + * This test verifies how changing the beam divergence affects the results obtained with the multi return feature enabled. + * Three cubes arranged in the shape of a stairs are placed on the scene. LiDAR aims the only ray at the center of the middle “stair", + * during the test the beam divergence angle is increased/decreased periodically. + */ +TEST_F(GraphMultiReturn, stairs) +{ + /* + * ____ ^ + * ____| | Z+ + * ____| middle step | + * | ----> X+ + */ + + GTEST_SKIP(); // Comment to run the test + + // Scene + const float stairsBaseHeight = 0.0f; + const float stepWidth = 1.0f; + const float stepHeight = vlp16LidarVBeamDiameter + 0.1f; + const float stepDepth = 0.8f; + const Vec3f stairsTranslation{2.0f, 0.0f, 0.0f}; + + spawnStairsOnScene(stepWidth, stepHeight, stepDepth, stairsBaseHeight, stairsTranslation); + + // Lidar + const std::vector raysTf{Mat3x4f::TRS({0.0f, 0.0f, 0.0f}, {90.0f, 0.0f, 90.0f}).toRGL()}; + + const float lidarMiddleStepDist = 1.5f * vlp16LidarObjectDistance; + const Vec3f lidarTransl{-lidarMiddleStepDist + stepDepth, 0.0f, stepHeight * 1.5f}; + + const rgl_mat3x4f lidarPose{(Mat3x4f::translation(lidarTransl + stairsTranslation)).toRGL()}; + + // Lidar with MR + constructMRGraph(raysTf, lidarPose, vlp16LidarHBeamDivergence, vlp16LidarVBeamDivergence, true); + + // Camera + rgl_mat3x4f cameraPose = Mat3x4f::translation({0.0f, -1.5f, stepHeight * 3 + 1.0f}).toRGL(); + constructCameraGraph(cameraPose); + ASSERT_RGL_SUCCESS(rgl_graph_run(cameraRays)); + + int frameId = 0; + while (true) { + const float newVBeamDivAngle = vlp16LidarVBeamDivergence + std::sin(frameId * 0.1f) * vlp16LidarVBeamDivergence; + ASSERT_RGL_SUCCESS( + rgl_node_raytrace_configure_beam_divergence(mrRaytrace, vlp16LidarHBeamDivergence, newVBeamDivAngle)); + + ASSERT_RGL_SUCCESS(rgl_graph_run(mrRaytrace)); + + std::this_thread::sleep_for(100ms); + frameId += 1; + } +} + +/** + * This test verifies the performance of the multi return feature when releasing multiple ray beams at once. + */ +TEST_F(GraphMultiReturn, multiple_ray_beams) +{ + GTEST_SKIP(); // Comment to run the test + + // Scene + spawnCubeOnScene(Mat3x4f::TRS({0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f}, {2.0f, 2.0f, 2.0f})); + + // Camera + constructCameraGraph(Mat3x4f::identity().toRGL()); + + // Lidar with MR + const int horizontalSteps = 10; + const auto resolution = 360.0f / horizontalSteps; + std::vector vlp16RaysTf; + vlp16RaysTf.reserve(horizontalSteps * vlp16Channels.size()); + + for (int i = 0; i < horizontalSteps; ++i) { + for (const auto& velodyneVLP16Channel : vlp16Channels) { + vlp16RaysTf.emplace_back((Mat3x4f::rotationDeg(0.0f, 90.0f, resolution * i) * velodyneVLP16Channel).toRGL()); + } + } + const rgl_mat3x4f lidarPose = Mat3x4f::identity().toRGL(); + const float beamDivAngle = vlp16LidarHBeamDivergence; + constructMRGraph(vlp16RaysTf, lidarPose, beamDivAngle, true); + + ASSERT_RGL_SUCCESS(rgl_graph_run(cameraRays)); + ASSERT_RGL_SUCCESS(rgl_graph_run(mrRays)); +} + +/** + * In the MR implementation, the beam is modeled using beam samples, from which the first and last hits are calculated. + * This test uses the code that generates these beam samples and fires them inside the cube. + * It verifies the accuracy of beam sample generation with different horizontal and vertical divergence angles. + */ +TEST_F(GraphMultiReturn, horizontal_vertical_beam_divergence) +{ + GTEST_SKIP(); // Comment to run the test + + const float hHalfDivergenceAngleRad = M_PI / 4; + const float vHalfDivergenceAngleRad = M_PI / 8; + + // Lidar + const auto lidarTransl = Vec3f{0.0f, 0.0f, 0.0f}; + const rgl_mat3x4f lidarPose = Mat3x4f::TRS(lidarTransl).toRGL(); + std::vector raysTf; + raysTf.reserve(MULTI_RETURN_BEAM_SAMPLES); + + raysTf.emplace_back(Mat3x4f::identity().toRGL()); + + // Code that generates beam samples in the makeBeamSampleRayTransform function (gpu/optixPrograms.cu) + for (int layerIdx = 0; layerIdx < MULTI_RETURN_BEAM_LAYERS; ++layerIdx) { + for (int vertexIdx = 0; vertexIdx < MULTI_RETURN_BEAM_VERTICES; ++vertexIdx) { + const float hCurrentDivergence = hHalfDivergenceAngleRad * + (1.0f - static_cast(layerIdx) / MULTI_RETURN_BEAM_LAYERS); + const float vCurrentDivergence = vHalfDivergenceAngleRad * + (1.0f - static_cast(layerIdx) / MULTI_RETURN_BEAM_LAYERS); + + const float angleStep = 2.0f * static_cast(M_PI) / MULTI_RETURN_BEAM_VERTICES; + + const float hAngle = hCurrentDivergence * std::cos(static_cast(vertexIdx) * angleStep); + const float vAngle = vCurrentDivergence * std::sin(static_cast(vertexIdx) * angleStep); + + const auto rotation = Mat3x4f::rotationRad(vAngle, 0.0f, 0.0f) * Mat3x4f::rotationRad(0.0f, hAngle, 0.0f); + raysTf.emplace_back(rotation.toRGL()); + } + } + + // Scene + spawnCubeOnScene(Mat3x4f::TRS({0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f}, {6.0f, 6.0f, 6.0f})); + + // Camera + constructCameraGraph(Mat3x4f::identity().toRGL()); + + // Graph without MR + EXPECT_RGL_SUCCESS(rgl_node_rays_from_mat3x4f(&rays, raysTf.data(), raysTf.size())); + EXPECT_RGL_SUCCESS(rgl_node_rays_transform(&transform, &lidarPose)); + EXPECT_RGL_SUCCESS(rgl_node_raytrace(&raytrace, nullptr)); + EXPECT_RGL_SUCCESS(rgl_node_points_format(&format, fields.data(), fields.size())); + EXPECT_RGL_SUCCESS(rgl_node_points_ros2_publish(&publish, "MRTest_Lidar_Without_MR", "world")); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(rays, transform)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(transform, raytrace)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(raytrace, format)); + EXPECT_RGL_SUCCESS(rgl_graph_node_add_child(format, publish)); + + ASSERT_RGL_SUCCESS(rgl_graph_run(cameraRays)); + ASSERT_RGL_SUCCESS(rgl_graph_run(rays)); +} +#endif diff --git a/test/src/graph/nodes/RadarTrackObjectsNodeTest.cpp b/test/src/graph/nodes/RadarTrackObjectsNodeTest.cpp new file mode 100644 index 000000000..dcf09c49c --- /dev/null +++ b/test/src/graph/nodes/RadarTrackObjectsNodeTest.cpp @@ -0,0 +1,579 @@ +#include +#include + +#include + + +class RadarTrackObjectsNodeTest : public RGLTest +{}; + +template +Type getRandomValue() +{ + static_assert(std::is_arithmetic_v, "Template arguments are not numbers."); + static std::uniform_real_distribution distribution(MinV, MaxV); + + const auto seed = std::random_device{}(); + std::mt19937 generator(seed); + + return distribution(generator); +} + +Vec3f getRandomVector() +{ + std::uniform_real_distribution<> distribution(0, 2 * M_PI); + + const auto seed = std::random_device{}(); + std::mt19937 generator(seed); + + double theta = distribution(generator), phi = distribution(generator); + return Vec3f{std::sin(theta) * std::cos(phi), std::sin(theta) * std::sin(phi), std::cos(theta)}; +} + +void generateDetectionFields(const Vec3f& clusterCenter, const Vec3f& clusterSpread, size_t clusterPointsCount, + Field::type clusterId, std::vector& xyz, std::vector& distance, + std::vector& azimuth, std::vector& elevation, std::vector& radialSpeed, + std::vector::type>& entityIds) +{ + const auto clusterXYZ = generateFieldValues(clusterPointsCount, genNormal); + for (const auto& detectionXYZ : clusterXYZ) { + const auto worldXYZ = detectionXYZ * clusterSpread + clusterCenter; + const auto worldSph = worldXYZ.toSpherical(); + + xyz.emplace_back(worldXYZ); + distance.emplace_back(worldSph[0]); + azimuth.emplace_back(worldSph[1]); + elevation.emplace_back(worldSph[2]); + radialSpeed.emplace_back(getRandomValue()); + entityIds.emplace_back(clusterId); + } +} + +void generateDetectionCluster(const Vec3f& clusterCenter, const Vec3f& clusterSpread, size_t clusterPointsCount, + Field::type clusterId, TestPointCloud& pointCloud) +{ + std::vector xyz; + std::vector distance, azimuth, elevation, radialSpeed; + std::vector::type> entityIds; + generateDetectionFields(clusterCenter, clusterSpread, clusterPointsCount, clusterId, xyz, distance, azimuth, elevation, + radialSpeed, entityIds); + + pointCloud.setFieldValues(xyz); + pointCloud.setFieldValues(distance); + pointCloud.setFieldValues(azimuth); + pointCloud.setFieldValues(elevation); + pointCloud.setFieldValues(radialSpeed); + pointCloud.setFieldValues(entityIds); +} + +void generateFixedDetectionClusters(TestPointCloud& pointCloud, size_t clusterCount, size_t clusterPointsCount) +{ + constexpr float centerScale = 10.0f; + const Vec3f clusterSpread = {1.0f}; + + std::vector xyz; + std::vector distance, azimuth, elevation, radialSpeed; + std::vector::type> entityIds; + + for (int i = 0; i < clusterCount; ++i) { + const auto angle = i * 2 * M_PI / static_cast(clusterCount); + const auto clusterCenter = Vec3f{std::cos(angle), std::sin(angle), 0.0f} * centerScale; + generateDetectionFields(clusterCenter, clusterSpread, clusterPointsCount, i, xyz, distance, azimuth, elevation, + radialSpeed, entityIds); + } + + pointCloud.setFieldValues(xyz); + pointCloud.setFieldValues(distance); + pointCloud.setFieldValues(azimuth); + pointCloud.setFieldValues(elevation); + pointCloud.setFieldValues(radialSpeed); + pointCloud.setFieldValues(entityIds); +} + +void generateRandomDetectionClusters(TestPointCloud& pointCloud, size_t clusterCount, size_t clusterPointsCount) +{ + constexpr float centerScale = 10.0f; + const Vec3f clusterSpread = {1.0f}; + const Vec3f centerOffset = Vec3f{20.0f, 0.0f, 0.0f}; + + std::vector xyz; + std::vector distance, azimuth, elevation, radialSpeed; + std::vector::type> entityIds; + + for (int i = 0; i < clusterCount; ++i) { + const auto clusterCenter = getRandomVector() * centerScale + centerOffset; + generateDetectionFields(clusterCenter, clusterSpread, clusterPointsCount, i, xyz, distance, azimuth, elevation, + radialSpeed, entityIds); + } + + pointCloud.setFieldValues(xyz); + pointCloud.setFieldValues(distance); + pointCloud.setFieldValues(azimuth); + pointCloud.setFieldValues(elevation); + pointCloud.setFieldValues(radialSpeed); + pointCloud.setFieldValues(entityIds); +} + +rgl_radar_object_class_t getObjectClass(const RadarTrackObjectsNode::ObjectState& objectState) +{ + if (objectState.classificationProbabilities.classCar > 0) { + return RGL_RADAR_CLASS_CAR; + } + if (objectState.classificationProbabilities.classTruck > 0) { + return RGL_RADAR_CLASS_TRUCK; + } + if (objectState.classificationProbabilities.classMotorcycle > 0) { + return RGL_RADAR_CLASS_MOTORCYCLE; + } + if (objectState.classificationProbabilities.classBicycle > 0) { + return RGL_RADAR_CLASS_BICYCLE; + } + if (objectState.classificationProbabilities.classPedestrian > 0) { + return RGL_RADAR_CLASS_PEDESTRIAN; + } + if (objectState.classificationProbabilities.classAnimal > 0) { + return RGL_RADAR_CLASS_ANIMAL; + } + if (objectState.classificationProbabilities.classHazard > 0) { + return RGL_RADAR_CLASS_HAZARD; + } + return RGL_RADAR_CLASS_UNKNOWN; +} + +TEST_F(RadarTrackObjectsNodeTest, objects_number_test) +{ + constexpr float distanceThreshold = 2.0f; + constexpr float azimuthThreshold = 0.5f; + constexpr float elevationThreshold = 0.5f; + constexpr float radialSpeedThreshold = 0.5f; + + constexpr float maxMatchingDistance = 1.0f; + constexpr float maxPredictionTimeFrame = 500.0f; + constexpr float movementSensitivity = 0.01; + + rgl_node_t trackObjectsNode = nullptr; + ASSERT_RGL_SUCCESS(rgl_node_points_radar_track_objects(&trackObjectsNode, distanceThreshold, azimuthThreshold, + elevationThreshold, radialSpeedThreshold, maxMatchingDistance, + maxPredictionTimeFrame, movementSensitivity)); + + constexpr size_t objectsCount = 5; + constexpr size_t detectionsCountPerObject = 10; + std::vector pointFields = Node::validatePtr(trackObjectsNode)->getRequiredFieldList(); + TestPointCloud inPointCloud(pointFields, objectsCount * detectionsCountPerObject); + generateFixedDetectionClusters(inPointCloud, objectsCount, detectionsCountPerObject); + + std::set::type> uniqueEntityIds; + for (auto entityId : inPointCloud.getFieldValues()) { + uniqueEntityIds.insert(entityId); + } + + std::vector::type> entityIds; + std::vector objectClasses; + for (auto entityId : uniqueEntityIds) { + // Object class is assigned just in some arbitrary way. + entityIds.push_back(entityId); + objectClasses.push_back(static_cast(entityId % RGL_RADAR_CLASS_COUNT)); + } + ASSERT_RGL_SUCCESS( + rgl_node_points_radar_set_classes(trackObjectsNode, entityIds.data(), objectClasses.data(), entityIds.size())); + + const auto usePointsNode = inPointCloud.createUsePointsNode(); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(usePointsNode, trackObjectsNode)); + ASSERT_RGL_SUCCESS(rgl_graph_run(trackObjectsNode)); + + // Verify objects count on the output. + int32_t detectedObjectsCount = 0, objectsSize = 0; + ASSERT_RGL_SUCCESS(rgl_graph_get_result_size(trackObjectsNode, XYZ_VEC3_F32, &detectedObjectsCount, &objectsSize)); + ASSERT_EQ(detectedObjectsCount, objectsCount); + + // Verify object classes - matching between entity ids and object classes is hold internally but not exposed through public interface. + // Object states are not directly connected to entities - they have their own ids. These are reasons behind following checkouts. + auto trackObjectsNodePtr = Node::validatePtr(trackObjectsNode); + const auto& objectStates = trackObjectsNodePtr->getObjectStates(); + std::unordered_map objectClassDeclaredCounts, objectClassDetectedCounts; + for (auto objectClass : objectClasses) { + ++objectClassDeclaredCounts[objectClass]; + } + for (const auto& objectState : objectStates) { + ++objectClassDetectedCounts[getObjectClass(objectState)]; + } + for (const auto& declaredClassCounts : objectClassDeclaredCounts) { + const auto it = objectClassDetectedCounts.find(declaredClassCounts.first); + ASSERT_TRUE(it != objectClassDetectedCounts.cend()); + ASSERT_EQ(it->second, declaredClassCounts.second); + } +} + +TEST_F(RadarTrackObjectsNodeTest, tracking_kinematic_object_test) +{ + constexpr float distanceThreshold = 2.0f; + constexpr float azimuthThreshold = 0.5f; + constexpr float elevationThreshold = 0.5f; + constexpr float radialSpeedThreshold = 0.5f; + + constexpr float maxMatchingDistance = 1.0f; + constexpr float maxPredictionTimeFrame = 500.0f; + constexpr float movementSensitivity = 0.01; + + rgl_node_t trackObjectsNode = nullptr; + ASSERT_RGL_SUCCESS(rgl_node_points_radar_track_objects(&trackObjectsNode, distanceThreshold, azimuthThreshold, + elevationThreshold, radialSpeedThreshold, maxMatchingDistance, + maxPredictionTimeFrame, movementSensitivity)); + + constexpr size_t detectionsCount = 10; + const Vec3f clusterSpread = {1.0f}; + const Vec3f& initialCloudTranslation = Vec3f{5.0f, -3.0f, 0.0f}; + const Vec3f iterationTranslation = Vec3f{0.0f, 0.1f, 0.0f}; + const uint64_t frameTimeNs = 5 * 1e6; // ms + + const int numberOfIterations = 60; + int iterationCounter = 0; + while (iterationCounter < numberOfIterations) { + auto trackObjectsNodePtr = Node::validatePtr(trackObjectsNode); + TestPointCloud inPointCloud(trackObjectsNodePtr->getRequiredFieldList(), detectionsCount); + generateDetectionCluster(initialCloudTranslation + static_cast(iterationCounter) * iterationTranslation, + clusterSpread, detectionsCount, 0, inPointCloud); + + auto usePointsNode = inPointCloud.createUsePointsNode(); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(usePointsNode, trackObjectsNode)); + + ASSERT_RGL_SUCCESS(rgl_scene_set_time(nullptr, iterationCounter * frameTimeNs)); + ASSERT_RGL_SUCCESS(rgl_graph_run(trackObjectsNode)); + + ASSERT_RGL_SUCCESS(rgl_graph_node_remove_child(usePointsNode, trackObjectsNode)); + + { + const auto& objectStates = trackObjectsNodePtr->getObjectStates(); + ASSERT_EQ(objectStates.size(), + 1); // Only one group of detections is generated, and they are assumed to be part of the same object. + + const auto& checkedObjectState = objectStates.front(); + ASSERT_NEAR(checkedObjectState.lastMeasuredTime, 1e-6 * iterationCounter * frameTimeNs, 1e-6); + + if (iterationCounter > 0) { + ASSERT_EQ(checkedObjectState.objectStatus, RadarTrackObjectsNode::ObjectStatus::Measured); + ASSERT_EQ(checkedObjectState.movementStatus, RadarTrackObjectsNode::MovementStatus::Moved); + + const auto measuredVelocity = checkedObjectState.absVelocity.getLastSample(); + const auto appliedVelocity = 1e9f * Vec2f(iterationTranslation.x(), iterationTranslation.y()) / frameTimeNs; + ASSERT_NEAR((measuredVelocity - appliedVelocity).length(), 0.0f, 1e-3f); + ASSERT_NEAR(checkedObjectState.absAccel.getLastSample().length(), 0.0f, 0.1f); + + const auto measuredOrientation = checkedObjectState.orientation.getLastSample(); + const auto appliedOrientation = atan2(appliedVelocity.y(), appliedVelocity.x()); + ASSERT_NEAR(measuredOrientation, appliedOrientation, 1e-3f); + ASSERT_NEAR(checkedObjectState.orientationRate.getLastSample(), 0.0f, 0.1f); + } + } + + ++iterationCounter; + } +} + +#if RGL_BUILD_ROS2_EXTENSION +#include +TEST_F(RadarTrackObjectsNodeTest, creating_random_objects_test) +{ + GTEST_SKIP_("Debug test on development stage."); + + std::vector fields{XYZ_VEC3_F32}; + + constexpr float distanceThreshold = 2.0f; + constexpr float azimuthThreshold = 0.1f; + constexpr float elevationThreshold = 0.1f; + constexpr float radialSpeedThreshold = 0.5f; + + constexpr float maxMatchingDistance = 1.0f; + constexpr float maxPredictionTimeFrame = 500.0f; + constexpr float movementSensitivity = 0.01; + + size_t iterationCounter = 0; + while (true) { + // Setup objects tracking node + rgl_node_t trackObjectsNode = nullptr, ros2DetectionsNode = nullptr, ros2ObjectsNode = nullptr, + detectionsFormat = nullptr, objectsFormat = nullptr; + ASSERT_RGL_SUCCESS(rgl_node_points_radar_track_objects(&trackObjectsNode, distanceThreshold, azimuthThreshold, + elevationThreshold, radialSpeedThreshold, maxMatchingDistance, + maxPredictionTimeFrame, movementSensitivity)); + ASSERT_RGL_SUCCESS(rgl_node_points_ros2_publish(&ros2DetectionsNode, "radar_detections", "world")); + ASSERT_RGL_SUCCESS(rgl_node_points_ros2_publish(&ros2ObjectsNode, "radar_objects", "world")); + ASSERT_RGL_SUCCESS(rgl_node_points_format(&detectionsFormat, fields.data(), fields.size())); + ASSERT_RGL_SUCCESS(rgl_node_points_format(&objectsFormat, fields.data(), fields.size())); + + const size_t objectsCount = getRandomValue(); + const size_t detectionsCountPerObject = getRandomValue(); + std::vector pointFields = + Node::validatePtr(trackObjectsNode)->getRequiredFieldList(); + TestPointCloud inPointCloud(pointFields, objectsCount * detectionsCountPerObject); + + generateRandomDetectionClusters(inPointCloud, objectsCount, detectionsCountPerObject); + + const auto usePointsNode = inPointCloud.createUsePointsNode(); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(usePointsNode, trackObjectsNode)); + + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(usePointsNode, detectionsFormat)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(detectionsFormat, ros2DetectionsNode)); + + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(trackObjectsNode, objectsFormat)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(objectsFormat, ros2ObjectsNode)); + + ASSERT_RGL_SUCCESS(rgl_graph_run(trackObjectsNode)); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + int32_t detectedObjectsCount = 0, objectsSize = 0; + rgl_graph_get_result_size(trackObjectsNode, fields.at(0), &detectedObjectsCount, &objectsSize); + + if (detectedObjectsCount != objectsCount) { + printf("[%lu] Detected / given objects: %d / %lu\n", iterationCounter++, detectedObjectsCount, objectsCount); + } + + EXPECT_RGL_SUCCESS(rgl_cleanup()); + } +} + +TEST_F(RadarTrackObjectsNodeTest, tracking_objects_test) +{ + GTEST_SKIP_("Debug test on development stage."); + + std::vector detectionFields{XYZ_VEC3_F32}; + std::vector objectFields{XYZ_VEC3_F32, ENTITY_ID_I32}; + + constexpr float distanceThreshold = 2.0f; + constexpr float azimuthThreshold = 0.1f; + constexpr float elevationThreshold = 0.1f; + constexpr float radialSpeedThreshold = 0.5f; + + constexpr float maxMatchingDistance = 1.0f; + constexpr float maxPredictionTimeFrame = 500.0f; + constexpr float movementSensitivity = 0.01; + + // Setup objects tracking node + rgl_node_t trackObjectsNode = nullptr, ros2DetectionsNode = nullptr, ros2ObjectsNode = nullptr, detectionsFormat = nullptr, + objectsFormat = nullptr; + ASSERT_RGL_SUCCESS(rgl_node_points_radar_track_objects(&trackObjectsNode, distanceThreshold, azimuthThreshold, + elevationThreshold, radialSpeedThreshold, maxMatchingDistance, + maxPredictionTimeFrame, movementSensitivity)); + ASSERT_RGL_SUCCESS(rgl_node_points_ros2_publish(&ros2DetectionsNode, "radar_detections", "world")); + ASSERT_RGL_SUCCESS(rgl_node_points_ros2_publish(&ros2ObjectsNode, "radar_objects", "world")); + ASSERT_RGL_SUCCESS(rgl_node_points_format(&detectionsFormat, detectionFields.data(), detectionFields.size())); + ASSERT_RGL_SUCCESS(rgl_node_points_format(&objectsFormat, objectFields.data(), objectFields.size())); + + constexpr size_t objectsCount = 5; + constexpr size_t detectionsCountPerObject = 10; + std::vector pointFields = Node::validatePtr(trackObjectsNode)->getRequiredFieldList(); + TestPointCloud inPointCloud(pointFields, objectsCount * detectionsCountPerObject); + + generateFixedDetectionClusters(inPointCloud, objectsCount, detectionsCountPerObject); + + auto usePointsNode = inPointCloud.createUsePointsNode(); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(usePointsNode, trackObjectsNode)); + + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(usePointsNode, detectionsFormat)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(detectionsFormat, ros2DetectionsNode)); + + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(trackObjectsNode, objectsFormat)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(objectsFormat, ros2ObjectsNode)); + + const uint64_t frameTime = 5 * 1e6; // ms + int iterationCounter = 0; + while (true) { + ASSERT_RGL_SUCCESS(rgl_scene_set_time(nullptr, iterationCounter * frameTime)); + ASSERT_RGL_SUCCESS(rgl_graph_run(trackObjectsNode)); + + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + rgl_graph_node_remove_child(usePointsNode, trackObjectsNode); + rgl_graph_node_remove_child(usePointsNode, detectionsFormat); + + inPointCloud.transform(Mat3x4f::rotationDeg(0.0f, 0.0f, 5.0f)); + usePointsNode = inPointCloud.createUsePointsNode(); + + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(usePointsNode, trackObjectsNode)); + ASSERT_RGL_SUCCESS(rgl_graph_node_add_child(usePointsNode, detectionsFormat)); + + ++iterationCounter; + } +} + +#include +#include +#include +#include + +TEST_F(RadarTrackObjectsNodeTest, tracking_object_with_radar_postprocess_input_test) +{ + GTEST_SKIP_("Debug test on development stage."); + + // Radar rays parameters + constexpr auto minAzimuth = -90.0f; + constexpr auto maxAzimuth = 90.0f; + constexpr auto minElevation = -90.0f; + constexpr auto maxElevation = 90.0f; + constexpr auto azimuthStep = 0.49f; + constexpr auto elevationStep = 0.49f; + + constexpr PostProcessNodeParams radarPostProcessParams{ + .scope = {.begin_distance = 1.0f, + .end_distance = 10.0f, + .distance_separation_threshold = 0.3f, + .radial_speed_separation_threshold = 0.3f, + .azimuth_separation_threshold = 8.0f * (std::numbers::pi_v / 180.0f)}, + .radarScopesCount = 1, + .azimuthStepRad = azimuthStep * (std::numbers::pi_v / 180.0f), + .elevationStepRad = elevationStep * (std::numbers::pi_v / 180.0f), + .frequencyHz = 79E9f, + .powerTransmittedDdm = 31.0f, + .antennaGainDbi = 27.0f, + .noiseMean = 60.0f, + .noiseStdDev = 1.0f, + }; + + constexpr TrackObjectNodeParams trackingParams{ + .distanceThreshold = 2.0f, + .azimuthThreshold = 0.5f, + .elevationThreshold = 0.5f, + .radialSpeedThreshold = 0.5f, + .maxMatchingDistance = 1.0f, + .maxPredictionTimeFrame = 1.0f, + .movementSensitivity = 0.01, + }; + + // Scene + rgl_mesh_t reflector2dMesh = loadFromSTL(std::filesystem::path(RGL_TEST_DATA_DIR) / "reflector2d.stl"); + const std::vector::type> entityIds{1}; + rgl_entity_t entity = nullptr; + const auto entityTransl = Vec3f{5.0f, 0.0f, 0.0f}; + const auto entityPose = Mat3x4f::translation(entityTransl).toRGL(); + + ASSERT_RGL_SUCCESS(rgl_entity_create(&entity, nullptr, reflector2dMesh)); + ASSERT_RGL_SUCCESS(rgl_entity_set_id(entity, entityIds.at(0))); + ASSERT_RGL_SUCCESS(rgl_entity_set_pose(entity, &entityPose)); + + // Radar rays + const std::vector radarRays = genRadarRays(minAzimuth, maxAzimuth, minElevation, maxElevation, azimuthStep, + elevationStep); + const Vec3f radarRayTransl = Vec3f{0.0f}; + const rgl_mat3x4f radarRayTf = Mat3x4f::translation(radarRayTransl).toRGL(); + + // Radar track objects graph + rgl_node_t postProcessNode = nullptr, trackObjectsNode = nullptr; + const std::vector objectClasses{ + static_cast(entityIds.at(0) % RGL_RADAR_CLASS_COUNT)}; + constructRadarPostProcessObjectTrackingGraph(radarRays, radarRayTf, postProcessNode, radarPostProcessParams, + trackObjectsNode, trackingParams, entityIds, objectClasses, true); + + // Auxiliary lidar graph for imaging entities on the scene + const Vec3f cameraTransl = radarRayTransl + Vec3f{-2.0f, 0.0f, 0.0f}; + const rgl_mat3x4f cameraPose = Mat3x4f::translation(cameraTransl).toRGL(); + rgl_node_t cameraRays = constructCameraGraph(cameraPose); + + constexpr uint64_t frameTime = 5 * 1e6; // ms + int iterationCounter = 0; + while (true) { + ASSERT_RGL_SUCCESS(rgl_scene_set_time(nullptr, iterationCounter * frameTime)); + + ASSERT_RGL_SUCCESS(rgl_graph_run(cameraRays)); + ASSERT_RGL_SUCCESS(rgl_graph_run(trackObjectsNode)); + + // Verify objects count on the output - only one object is expected + int32_t detectedObjectsCount = 0, objectsSize = 0; + ASSERT_RGL_SUCCESS(rgl_graph_get_result_size(trackObjectsNode, XYZ_VEC3_F32, &detectedObjectsCount, &objectsSize)); + + int32_t expectedObjectsCount = 1; + ASSERT_EQ(detectedObjectsCount, expectedObjectsCount); + + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + ++iterationCounter; + } +} + +TEST_F(RadarTrackObjectsNodeTest, tracking_kinematic_object_with_radar_postprocess_input_test) +{ + GTEST_SKIP_("Debug test on development stage."); + + // Radar rays parameters + constexpr auto minAzimuth = -90.0f; + constexpr auto maxAzimuth = 90.0f; + constexpr auto minElevation = -90.0f; + constexpr auto maxElevation = 90.0f; + constexpr auto azimuthStep = 0.49f; + constexpr auto elevationStep = 0.49f; + + constexpr PostProcessNodeParams radarPostProcessParams{ + .scope = {.begin_distance = 15.0f, + .end_distance = 30.0f, + .distance_separation_threshold = 0.3f, + .radial_speed_separation_threshold = 0.3f, + .azimuth_separation_threshold = 8.0f * (std::numbers::pi_v / 180.0f)}, + .radarScopesCount = 1, + .azimuthStepRad = azimuthStep * (std::numbers::pi_v / 180.0f), + .elevationStepRad = elevationStep * (std::numbers::pi_v / 180.0f), + .frequencyHz = 79E9f, + .powerTransmittedDdm = 31.0f, + .antennaGainDbi = 27.0f, + .noiseMean = 60.0f, + .noiseStdDev = 1.0f, + }; + + constexpr TrackObjectNodeParams trackingParams{ + .distanceThreshold = 2.0f, + .azimuthThreshold = 0.5f, + .elevationThreshold = 0.5f, + .radialSpeedThreshold = 0.5f, + .maxMatchingDistance = 1.0f, + .maxPredictionTimeFrame = 1.0f, + .movementSensitivity = 0.01, + }; + + // Scene + rgl_mesh_t reflector2dMesh = loadFromSTL(std::filesystem::path(RGL_TEST_DATA_DIR) / "reflector2d.stl"); + const std::vector::type> entityIds{1}; + rgl_entity_t entity = nullptr; + + ASSERT_RGL_SUCCESS(rgl_entity_create(&entity, nullptr, reflector2dMesh)); + ASSERT_RGL_SUCCESS(rgl_entity_set_id(entity, entityIds.at(0))); + + // Radar rays + const std::vector radarRays = genRadarRays(minAzimuth, maxAzimuth, minElevation, maxElevation, azimuthStep, + elevationStep); + const Vec3f radarRayTransl = Vec3f{0.0f}; + const rgl_mat3x4f radarRayTf = Mat3x4f::translation(radarRayTransl).toRGL(); + + // Radar track objects graph + rgl_node_t postProcessNode = nullptr, trackObjectsNode = nullptr; + const std::vector objectClasses{ + static_cast(entityIds.at(0) % RGL_RADAR_CLASS_COUNT)}; + constructRadarPostProcessObjectTrackingGraph(radarRays, radarRayTf, postProcessNode, radarPostProcessParams, + trackObjectsNode, trackingParams, entityIds, objectClasses, true); + + // Auxiliary lidar graph for imaging entities on the scene + const Vec3f cameraTransl = radarRayTransl + Vec3f{0.0f, 0.0f, 2.0f}; + const rgl_mat3x4f cameraPose = Mat3x4f::translation(cameraTransl).toRGL(); + rgl_node_t cameraRays = constructCameraGraph(cameraPose); + + // Kinematic object parameters + constexpr float maxObjectRadarDistance = radarPostProcessParams.scope.end_distance + 2.0f; + constexpr float minObjectRadarDistance = radarPostProcessParams.scope.begin_distance - 2.0f; + constexpr float amplitude = (maxObjectRadarDistance - minObjectRadarDistance) / 2.0f; + constexpr float shift = (maxObjectRadarDistance + minObjectRadarDistance) / 2.0f; + + constexpr uint64_t frameTime = 5 * 1e6; // ms + int iterationCounter = 0; + while (true) { + const auto entityTransl = radarRayTransl + + Vec3f{amplitude * std::sin(static_cast(iterationCounter) * 0.1f) + shift, 0.0f, 0.0f}; + const auto entityPose = Mat3x4f::translation(entityTransl).toRGL(); + ASSERT_RGL_SUCCESS(rgl_entity_set_pose(entity, &entityPose)); + + ASSERT_RGL_SUCCESS(rgl_scene_set_time(nullptr, iterationCounter * frameTime)); + + ASSERT_RGL_SUCCESS(rgl_graph_run(postProcessNode)); + ASSERT_RGL_SUCCESS(rgl_graph_run(cameraRays)); + + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + ++iterationCounter; + } +} +#endif diff --git a/test/src/scene/radarTest.cpp b/test/src/scene/radarTest.cpp index 89aba4282..16b08a085 100644 --- a/test/src/scene/radarTest.cpp +++ b/test/src/scene/radarTest.cpp @@ -24,14 +24,15 @@ struct numpunct : std::numpunct }; struct RadarTest : RGLTest -{}; - -const auto minAzimuth = -65.0f; -const auto maxAzimuth = 65.0f; -const auto minElevation = -7.5f; -const auto maxElevation = 7.5f; -const auto azimuthStep = 0.49f; -const auto elevationStep = 0.49f; +{ +protected: + const float minAzimuth = -65.0f; + const float maxAzimuth = 65.0f; + const float minElevation = -7.5f; + const float maxElevation = 7.5f; + const float azimuthStep = 0.49f; + const float elevationStep = 0.49f; +}; rgl_mesh_t getFlatPlate(float dim) { @@ -59,37 +60,17 @@ rgl_mesh_t getFlatPlate(float dim) return outMesh; } -std::vector genRadarRays() -{ - std::vector rays; - for (auto a = minAzimuth; a <= maxAzimuth; a += azimuthStep) { - for (auto e = minElevation; e <= maxElevation; e += elevationStep) { - // By default, the rays are directed along the Z-axis - // So first, we rotate them around the Y-axis to point towards the X-axis (to be RVIZ2 compatible) - // Then, rotation around Z is azimuth, around Y is elevation - const auto ray = Mat3x4f::rotationDeg(a, e, 0); - rays.emplace_back(ray.toRGL()); - - // The above will have to be modified again - we assume that target is farther in X axis when in fact - // we use Z as RGL LiDAR front. Remember to update. - - const auto rayDir = ray * Vec3f{0, 0, 1}; - //printf("rayDir: %.2f %.2f %.2f\n", rayDir.x(), rayDir.y(), rayDir.z()); - } - } - - return rays; -} - namespace fs = std::filesystem; using namespace std::chrono_literals; +#include + TEST_F(RadarTest, rotating_reflector_2d) { GTEST_SKIP(); // Setup sensor and graph - std::vector raysData = genRadarRays(); + std::vector raysData = genRadarRays(minAzimuth, maxAzimuth, minElevation, maxElevation, azimuthStep, elevationStep); // std::vector raysData = { // Mat3x4f::rotationDeg(0, 0, 0).toRGL(), //// Mat3x4f::rotationDeg(2.5, 0, 0).toRGL(), @@ -111,7 +92,7 @@ TEST_F(RadarTest, rotating_reflector_2d) lidarPublish = nullptr; rgl_node_t raysTransform = nullptr; - auto raycasterTransform = Mat3x4f::rotationDeg(0.0f, 90.0f, 0.0f).toRGL(); + auto raycasterTransform = Mat3x4f::identity().toRGL(); EXPECT_RGL_SUCCESS(rgl_node_rays_from_mat3x4f(&rays, raysData.data(), raysData.size())); EXPECT_RGL_SUCCESS(rgl_node_rays_transform(&raysTransform, &raycasterTransform));