diff --git a/detection_6d_foundationpose/include/detection_6d_foundationpose/foundationpose.hpp b/detection_6d_foundationpose/include/detection_6d_foundationpose/foundationpose.hpp index c10726b..d47643d 100644 --- a/detection_6d_foundationpose/include/detection_6d_foundationpose/foundationpose.hpp +++ b/detection_6d_foundationpose/include/detection_6d_foundationpose/foundationpose.hpp @@ -29,6 +29,7 @@ class Base6DofDetectionModel { * @param mask Object mask (CV_8UC1 format, positive pixels > 0) * @param target_name Object category name (must match construction mapping) * @param out_pose_in_mesh Output pose in mesh coordinate frame + * @param refine_itr Refinement process iteration num * @return true Registration successful * @return false Registration failed */ @@ -36,7 +37,8 @@ class Base6DofDetectionModel { const cv::Mat &depth, const cv::Mat &mask, const std::string &target_name, - Eigen::Matrix4f &out_pose_in_mesh) = 0; + Eigen::Matrix4f &out_pose_in_mesh, + size_t refine_itr = 1) = 0; /** * @brief Track object pose from subsequent frames (lightweight version of Register) @@ -50,6 +52,7 @@ class Base6DofDetectionModel { * @param hyp_pose_in_mesh Hypothesis pose in mesh frame (from Register or other sources) * @param target_name Object category name (must match construction mapping) * @param out_pose_in_mesh Output pose in mesh coordinate frame + * @param refine_itr Refinement process iteration num * @return true Tracking successful * @return false Tracking failed */ @@ -57,7 +60,8 @@ class Base6DofDetectionModel { const cv::Mat &depth, const Eigen::Matrix4f &hyp_pose_in_mesh, const std::string &target_name, - Eigen::Matrix4f &out_pose_in_mesh) = 0; + Eigen::Matrix4f &out_pose_in_mesh, + size_t refine_itr = 1) = 0; /** * @brief Virtual destructor for proper resource cleanup diff --git a/detection_6d_foundationpose/src/foundationpose.cpp b/detection_6d_foundationpose/src/foundationpose.cpp index 268be2e..c42cc9a 100644 --- a/detection_6d_foundationpose/src/foundationpose.cpp +++ b/detection_6d_foundationpose/src/foundationpose.cpp @@ -33,20 +33,21 @@ class FoundationPose : public Base6DofDetectionModel { const int max_input_image_W = 1920, const int crop_window_H = 160, const int crop_window_W = 160, - const float min_depth = 0.1, - const float max_depth = 4.0); + const float min_depth = 0.001); bool Register(const cv::Mat &rgb, const cv::Mat &depth, const cv::Mat &mask, const std::string &target_name, - Eigen::Matrix4f &out_pose_in_mesh) override; + Eigen::Matrix4f &out_pose_in_mesh, + size_t refine_itr = 1) override; bool Track(const cv::Mat &rgb, const cv::Mat &depth, const Eigen::Matrix4f &hyp_pose_in_mesh, const std::string &target_name, - Eigen::Matrix4f &out_pose_in_mesh) override; + Eigen::Matrix4f &out_pose_in_mesh, + size_t refine_itr = 1) override; private: bool CheckInputArguments(const cv::Mat &rgb, @@ -54,18 +55,22 @@ class FoundationPose : public Base6DofDetectionModel { const cv::Mat &mask, const std::string &target_name); - bool UploadDataToDevice(const cv::Mat &rgb, - const cv::Mat &depth, - const cv::Mat &mask, - const std::shared_ptr &package); + using ParsingType = std::unique_ptr; - bool RefinePreProcess(std::shared_ptr package); + bool UploadDataToDevice(const cv::Mat &rgb, + const cv::Mat &depth, + const cv::Mat &mask, + const ParsingType &package); - bool ScorePreprocess(std::shared_ptr package); + bool RefinePreProcess(const ParsingType &package); - bool ScorePostProcess(std::shared_ptr package); + bool RefinePostProcess(const ParsingType &package); - bool TrackPostProcess(std::shared_ptr package); + bool ScorePreprocess(const ParsingType &package); + + bool ScorePostProcess(const ParsingType &package); + + bool TrackPostProcess(const ParsingType &package); private: // 以下参数不对外开放 @@ -77,10 +82,10 @@ class FoundationPose : public Base6DofDetectionModel { const float REFINE_ROT_NORMALIZER = 0.349065850398865; const std::string SCORE_OUTPUT_BLOB_NAME = "scores"; // render参数 - const int score_mode_poses_num_ = 252; - const int refine_mode_poses_num_ = 1; - const float refine_mode_crop_ratio = 1.2; - const float score_mode_crop_ratio = 1.1; + const int score_mode_poses_num_ = 252; + const int refine_mode_poses_num_ = 1; + const float refine_mode_crop_ratio_ = 1.2; + const float score_mode_crop_ratio_ = 1.1; private: // 以下参数对外开放,通过构造函数传入 @@ -108,8 +113,7 @@ FoundationPose::FoundationPose(std::shared_ptr const int max_input_image_W, const int crop_window_H, const int crop_window_W, - const float min_depth, - const float max_depth) + const float min_depth) : refiner_core_(refiner_core), scorer_core_(scorer_core), intrinsic_(intrinsic), @@ -152,13 +156,13 @@ FoundationPose::FoundationPose(std::shared_ptr { const std::string &target_name = mesh_loader->GetName(); LOG(INFO) << "[FoundationPose] Got target_name : " << target_name; - map_name2loaders_[target_name] = mesh_loader; - map_name2renderer_[target_name] = std::make_shared( - mesh_loader, intrinsic_, score_mode_poses_num_, refine_mode_crop_ratio); + map_name2loaders_[target_name] = mesh_loader; + map_name2renderer_[target_name] = + std::make_shared(mesh_loader, intrinsic_, score_mode_poses_num_); } hyp_poses_sampler_ = std::make_shared( - max_input_image_H_, max_input_image_W_, min_depth, max_depth, intrinsic_); + max_input_image_H_, max_input_image_W_, min_depth, intrinsic_); } bool FoundationPose::CheckInputArguments(const cv::Mat &rgb, @@ -191,12 +195,13 @@ bool FoundationPose::Register(const cv::Mat &rgb, const cv::Mat &depth, const cv::Mat &mask, const std::string &target_name, - Eigen::Matrix4f &out_pose_in_mesh) + Eigen::Matrix4f &out_pose_in_mesh, + size_t refine_itr) { CHECK_STATE(CheckInputArguments(rgb, depth, mask, target_name), "[FoundationPose] `Register` Got invalid arguments!!!"); - auto package = std::make_shared(); + auto package = std::make_unique(); package->rgb_on_host = rgb; package->depth_on_host = depth; package->mask_on_host = mask; @@ -205,19 +210,23 @@ bool FoundationPose::Register(const cv::Mat &rgb, MESSURE_DURATION_AND_CHECK_STATE(UploadDataToDevice(rgb, depth, mask, package), "[FoundationPose] SyncDetect Failed to upload data!!!"); - MESSURE_DURATION_AND_CHECK_STATE( - RefinePreProcess(package), - "[FoundationPose] SyncDetect Failed to execute RefinePreProcess!!!"); + for (size_t i = 0 ; i < refine_itr ; ++ i) { + MESSURE_DURATION_AND_CHECK_STATE( + RefinePreProcess(package), + "[FoundationPose] SyncDetect Failed to execute RefinePreProcess!!!"); - // package->infer_buffer = package->refiner_blobs_buffer; - MESSURE_DURATION_AND_CHECK_STATE( - refiner_core_->SyncInfer(package->GetInferBuffer()), - "[FoundationPose] SyncDetect Failed to execute refiner_core_->SyncInfer!!!"); + MESSURE_DURATION_AND_CHECK_STATE( + refiner_core_->SyncInfer(package->GetInferBuffer()), + "[FoundationPose] SyncDetect Failed to execute refiner_core_->SyncInfer!!!"); + + MESSURE_DURATION_AND_CHECK_STATE( + RefinePostProcess(package), + "[FoundationPose] SyncDetect Failed to execute RefinePostProcess!!!"); + } MESSURE_DURATION_AND_CHECK_STATE( ScorePreprocess(package), "[FoundationPose] SyncDetect Failed to execute ScorePreprocess!!!"); - // unit_buffer->p_blob_buffers = package->scorer_blobs_buffer; MESSURE_DURATION_AND_CHECK_STATE( scorer_core_->SyncInfer(package->GetInferBuffer()), "[FoundationPose] SyncDetect Failed to execute scorer_core_->SyncInfer!!!"); @@ -234,12 +243,13 @@ bool FoundationPose::Track(const cv::Mat &rgb, const cv::Mat &depth, const Eigen::Matrix4f &hyp_pose_in_mesh, const std::string &target_name, - Eigen::Matrix4f &out_pose_in_mesh) + Eigen::Matrix4f &out_pose_in_mesh, + size_t refine_itr) { CHECK_STATE(CheckInputArguments(rgb, depth, cv::Mat(), target_name), "[FoundationPose] `Track` Got invalid arguments!!!"); - auto package = std::make_shared(); + auto package = std::make_unique(); package->rgb_on_host = rgb; package->depth_on_host = depth; package->target_name = target_name; @@ -248,26 +258,27 @@ bool FoundationPose::Track(const cv::Mat &rgb, MESSURE_DURATION_AND_CHECK_STATE(UploadDataToDevice(rgb, depth, cv::Mat(), package), "[FoundationPose] Track Failed to upload data!!!"); - MESSURE_DURATION_AND_CHECK_STATE(RefinePreProcess(package), - "[FoundationPose] Track Failed to execute RefinePreProcess!!!"); + for (size_t i = 0 ; i < refine_itr ; ++ i) { + MESSURE_DURATION_AND_CHECK_STATE(RefinePreProcess(package), + "[FoundationPose] Track Failed to execute RefinePreProcess!!!"); - MESSURE_DURATION_AND_CHECK_STATE( - refiner_core_->SyncInfer(package->GetInferBuffer()), - "[FoundationPose] Track Failed to execute refiner_core_->SyncInfer!!!"); + MESSURE_DURATION_AND_CHECK_STATE( + refiner_core_->SyncInfer(package->GetInferBuffer()), + "[FoundationPose] Track Failed to execute refiner_core_->SyncInfer!!!"); - MESSURE_DURATION_AND_CHECK_STATE(TrackPostProcess(package), - "[Foundation] Track Failed to execute `TrackPostProcess`!!!"); + MESSURE_DURATION_AND_CHECK_STATE(RefinePostProcess(package), + "[Foundation] Track Failed to execute `RefinePostProcess`!!!"); + } - out_pose_in_mesh = std::move(package->actual_pose); + out_pose_in_mesh = std::move(package->hyp_poses[0]); return true; } -bool FoundationPose::UploadDataToDevice( - const cv::Mat &rgb, - const cv::Mat &depth, - const cv::Mat &mask, - const std::shared_ptr &package) +bool FoundationPose::UploadDataToDevice(const cv::Mat &rgb, + const cv::Mat &depth, + const cv::Mat &mask, + const ParsingType &package) { const int input_image_height = rgb.rows, input_image_width = rgb.cols; package->input_image_height = input_image_height; @@ -297,7 +308,7 @@ bool FoundationPose::UploadDataToDevice( convert_depth_to_xyz_map(static_cast(depth_on_device), input_image_height, input_image_width, static_cast(xyz_map_on_device), intrinsic_(0, 0), intrinsic_(1, 1), intrinsic_(0, 2), intrinsic_(1, 2), - 0.1); + 0.001); // 输出device端指针,并注册析构过程 auto func_release_cuda_buffer = [](void *ptr) { @@ -314,11 +325,8 @@ bool FoundationPose::UploadDataToDevice( return true; } -bool FoundationPose::RefinePreProcess(std::shared_ptr _package) +bool FoundationPose::RefinePreProcess(const ParsingType &package) { - auto package = std::dynamic_pointer_cast(_package); - CHECK_STATE(package != nullptr, "[FoundationPose] RefinePreProcess Got INVALID package ptr!!!"); - // 1. sample if (package->hyp_poses.empty()) { @@ -329,17 +337,22 @@ bool FoundationPose::RefinePreProcess(std::shared_ptrtarget_name]; - auto refiner_blob_buffer = refiner_core_->GetBuffer(false); + if (package->refiner_blobs_buffer == nullptr) { + package->refiner_blobs_buffer = refiner_core_->GetBuffer(true); + } + const auto& refiner_blob_buffer = package->refiner_blobs_buffer; // 设置推理前blob的输入位置为device,输出的blob位置为host端 refiner_blob_buffer->SetBlobBuffer(RENDER_INPUT_BLOB_NAME, DataLocation::DEVICE); refiner_blob_buffer->SetBlobBuffer(TRANSF_INPUT_BLOB_NAME, DataLocation::DEVICE); + + auto &refine_renderer = map_name2renderer_[package->target_name]; CHECK_STATE( refine_renderer->RenderAndTransform( package->hyp_poses, package->rgb_on_device.get(), package->depth_on_device.get(), package->xyz_map_on_device.get(), package->input_image_height, package->input_image_width, refiner_blob_buffer->GetOuterBlobBuffer(RENDER_INPUT_BLOB_NAME).first, - refiner_blob_buffer->GetOuterBlobBuffer(TRANSF_INPUT_BLOB_NAME).first), + refiner_blob_buffer->GetOuterBlobBuffer(TRANSF_INPUT_BLOB_NAME).first, + refine_mode_crop_ratio_), "[FoundationPose] Failed to render and transform !!!"); // 3. 设置推理时形状 const int input_poses_num = package->hyp_poses.size(); @@ -347,22 +360,21 @@ bool FoundationPose::RefinePreProcess(std::shared_ptrSetBlobShape(TRANSF_INPUT_BLOB_NAME, {input_poses_num, crop_window_H_, crop_window_W_, 6}); - package->refiner_blobs_buffer = refiner_blob_buffer; package->infer_buffer = refiner_blob_buffer; return true; } -bool FoundationPose::ScorePreprocess(std::shared_ptr _package) +bool FoundationPose::RefinePostProcess(const ParsingType &package) { - auto package = std::dynamic_pointer_cast(_package); - CHECK_STATE(package != nullptr, "[FoundationPose] ScorePreprocess Got INVALID package ptr!!!"); // 获取refiner模型的缓存指针 const auto &refiner_blob_buffer = package->refiner_blobs_buffer; const auto _trans_ptr = refiner_blob_buffer->GetOuterBlobBuffer(REFINE_TRANS_OUT_BLOB_NAME).first; const auto _rot_ptr = refiner_blob_buffer->GetOuterBlobBuffer(REFINE_ROT_OUT_BLOB_NAME).first; const float *trans_ptr = static_cast(_trans_ptr); const float *rot_ptr = static_cast(_rot_ptr); + CHECK_STATE(trans_ptr != nullptr, "[FoundationPose] RefinePostProcess got invalid trans_ptr !"); + CHECK_STATE(rot_ptr != nullptr, "[FoundationPose] RefinePostProcess got invalid rot_ptr !"); // 获取生成的假设位姿 const auto &hyp_poses = package->hyp_poses; @@ -372,10 +384,12 @@ bool FoundationPose::ScorePreprocess(std::shared_ptrtarget_name]; // transformation 将模型输出的相对位姿转换为绝对位姿 - const float mesh_diameter = mesh_loader->GetMeshDiameter(); + const float mesh_diameter = mesh_loader->GetMeshDiameter(); + std::vector trans_delta(poses_num); std::vector rot_delta(poses_num); std::vector rot_mat_delta(poses_num); + for (int i = 0; i < poses_num; ++i) { const size_t offset = i * 3; @@ -399,6 +413,12 @@ bool FoundationPose::ScorePreprocess(std::shared_ptr(0, 0) = result_3x3; } + package->hyp_poses = std::move(refine_poses); + return true; +} + +bool FoundationPose::ScorePreprocess(const ParsingType &package) +{ auto scorer_blob_buffer = scorer_core_->GetBuffer(false); // 获取对应的score_renderer // 设置推理前后blob输出的位置,这里输入输出都在device端 @@ -408,28 +428,26 @@ bool FoundationPose::ScorePreprocess(std::shared_ptrtarget_name]; CHECK_STATE( score_renderer->RenderAndTransform( - refine_poses, package->rgb_on_device.get(), package->depth_on_device.get(), + package->hyp_poses, package->rgb_on_device.get(), package->depth_on_device.get(), package->xyz_map_on_device.get(), package->input_image_height, package->input_image_width, scorer_blob_buffer->GetOuterBlobBuffer(RENDER_INPUT_BLOB_NAME).first, - scorer_blob_buffer->GetOuterBlobBuffer(TRANSF_INPUT_BLOB_NAME).first), + scorer_blob_buffer->GetOuterBlobBuffer(TRANSF_INPUT_BLOB_NAME).first, + score_mode_crop_ratio_), "[FoundationPose] score_renderer RenderAndTransform Failed!!!"); - package->refine_poses = std::move(refine_poses); package->scorer_blobs_buffer = scorer_blob_buffer; package->infer_buffer = scorer_blob_buffer; return true; } -bool FoundationPose::ScorePostProcess(std::shared_ptr _package) +bool FoundationPose::ScorePostProcess(const ParsingType &package) { - auto package = std::dynamic_pointer_cast(_package); - CHECK_STATE(package != nullptr, "[FoundationPose] ScorePostProcess Got INVALID package ptr!!!"); const auto &scorer_blob_buffer = package->scorer_blobs_buffer; // 获取scorer模型的输出缓存指针 void *score_ptr = scorer_blob_buffer->GetOuterBlobBuffer(SCORE_OUTPUT_BLOB_NAME).first; - const auto &refine_poses = package->refine_poses; + const auto &refine_poses = package->hyp_poses; const int poses_num = refine_poses.size(); // 获取置信度最大的refined_pose @@ -439,58 +457,6 @@ bool FoundationPose::ScorePostProcess(std::shared_ptr _package) -{ - auto package = std::dynamic_pointer_cast(_package); - CHECK_STATE(package != nullptr, "[FoundationPose] TrackPostProcess Got INVALID package ptr!!!"); - - // 获取refiner模型的缓存指针 - const auto &refiner_blob_buffer = package->refiner_blobs_buffer; - const auto _trans_ptr = refiner_blob_buffer->GetOuterBlobBuffer(REFINE_TRANS_OUT_BLOB_NAME).first; - const auto _rot_ptr = refiner_blob_buffer->GetOuterBlobBuffer(REFINE_ROT_OUT_BLOB_NAME).first; - const float *trans_ptr = static_cast(_trans_ptr); - const float *rot_ptr = static_cast(_rot_ptr); - - // 获取生成的假设位姿 - const auto &hyp_poses = package->hyp_poses; - const int poses_num = hyp_poses.size(); - - // 获取对应的mesh_loader - const auto &mesh_loader = map_name2loaders_[package->target_name]; - - // transformation 将模型输出的相对位姿转换为绝对位姿 - const float mesh_diameter = mesh_loader->GetMeshDiameter(); - std::vector trans_delta(poses_num); - std::vector rot_delta(poses_num); - std::vector rot_mat_delta(poses_num); - for (int i = 0; i < poses_num; ++i) - { - const size_t offset = i * 3; - trans_delta[i] << trans_ptr[offset], trans_ptr[offset + 1], trans_ptr[offset + 2]; - trans_delta[i] *= mesh_diameter / 2; - - rot_delta[i] << rot_ptr[offset], rot_ptr[offset + 1], rot_ptr[offset + 2]; - auto normalized_vect = (rot_delta[i].array().tanh() * REFINE_ROT_NORMALIZER).matrix(); - Eigen::AngleAxis rot_delta_angle_axis(normalized_vect.norm(), normalized_vect.normalized()); - rot_mat_delta[i] = rot_delta_angle_axis.toRotationMatrix().transpose(); - } - - std::vector refine_poses(poses_num); - for (int i = 0; i < poses_num; ++i) - { - refine_poses[i] = hyp_poses[i]; - refine_poses[i].col(3).head(3) += trans_delta[i]; - - Eigen::Matrix3f top_left_3x3 = refine_poses[i].block<3, 3>(0, 0); - Eigen::Matrix3f result_3x3 = rot_mat_delta[i] * top_left_3x3; - refine_poses[i].block<3, 3>(0, 0) = result_3x3; - } - - package->actual_pose = refine_poses[0]; - - return true; -} - std::shared_ptr CreateFoundationPoseModel( std::shared_ptr refiner_core, std::shared_ptr scorer_core, diff --git a/detection_6d_foundationpose/src/foundationpose_render.cpp b/detection_6d_foundationpose/src/foundationpose_render.cpp index 5742ca8..9c9e7eb 100644 --- a/detection_6d_foundationpose/src/foundationpose_render.cpp +++ b/detection_6d_foundationpose/src/foundationpose_render.cpp @@ -220,7 +220,6 @@ void WrapFloatPtrToNHWCTensor( FoundationPoseRenderer::FoundationPoseRenderer(std::shared_ptr mesh_loader, const Eigen::Matrix3f &intrinsic, const int input_poses_num, - const float crop_ratio, const int crop_window_H, const int crop_window_W, const float min_depth, @@ -228,7 +227,6 @@ FoundationPoseRenderer::FoundationPoseRenderer(std::shared_ptr m : mesh_loader_(mesh_loader), intrinsic_(intrinsic), input_poses_num_(input_poses_num), - crop_ratio_(crop_ratio), crop_window_H_(crop_window_H), crop_window_W_(crop_window_W), min_depth_(min_depth), @@ -820,14 +818,15 @@ bool FoundationPoseRenderer::RenderAndTransform(const std::vector poses(_poses.begin(), _poses.end()); Eigen::Vector2i out_size = {crop_window_H_, crop_window_W_}; - auto tfs = ComputeCropWindowTF(poses, intrinsic_, out_size, crop_ratio_, mesh_diameter_); + auto tfs = ComputeCropWindowTF(poses, intrinsic_, out_size, crop_ratio, mesh_diameter_); CHECK_STATE(tfs.size() != 0, "[FoundationposeRender] The transform matrix vector is empty"); // 2. 将输入的poses拷贝到device端 diff --git a/detection_6d_foundationpose/src/foundationpose_render.hpp b/detection_6d_foundationpose/src/foundationpose_render.hpp index 4c0ebfa..1b32b52 100644 --- a/detection_6d_foundationpose/src/foundationpose_render.hpp +++ b/detection_6d_foundationpose/src/foundationpose_render.hpp @@ -21,10 +21,9 @@ class FoundationPoseRenderer { FoundationPoseRenderer(std::shared_ptr mesh_loader, const Eigen::Matrix3f &intrinsic, const int input_poses_num, - const float crop_ratio = 1.2, const int crop_window_H = 160, const int crop_window_W = 160, - const float min_depth = 0.1, + const float min_depth = 0.001, const float max_depth = 4.0); bool RenderAndTransform(const std::vector &_poses, @@ -34,7 +33,8 @@ class FoundationPoseRenderer { int input_image_height, int input_image_width, void *render_buffer, - void *transf_buffer); + void *transf_buffer, + float crop_ratio); ~FoundationPoseRenderer(); @@ -94,7 +94,6 @@ class FoundationPoseRenderer { // crop window size (model input size) const int crop_window_H_; const int crop_window_W_; - const float crop_ratio_; // refine, score->1.1 const Eigen::Matrix3f intrinsic_; // depth threshold diff --git a/detection_6d_foundationpose/src/foundationpose_sampling.cpp b/detection_6d_foundationpose/src/foundationpose_sampling.cpp index 78831ee..f616adf 100644 --- a/detection_6d_foundationpose/src/foundationpose_sampling.cpp +++ b/detection_6d_foundationpose/src/foundationpose_sampling.cpp @@ -300,7 +300,6 @@ bool GuessTranslation(const Eigen::MatrixXf &depth, FoundationPoseSampler::FoundationPoseSampler(const int max_input_image_H, const int max_input_image_W, const float min_depth, - const float max_depth, const Eigen::Matrix3f &intrinsic) : max_input_image_H_(max_input_image_H), max_input_image_W_(max_input_image_W), diff --git a/detection_6d_foundationpose/src/foundationpose_sampling.hpp b/detection_6d_foundationpose/src/foundationpose_sampling.hpp index 518ccea..545d1d5 100644 --- a/detection_6d_foundationpose/src/foundationpose_sampling.hpp +++ b/detection_6d_foundationpose/src/foundationpose_sampling.hpp @@ -13,7 +13,6 @@ class FoundationPoseSampler { FoundationPoseSampler(const int max_input_image_H, const int max_input_image_W, const float min_depth, - const float max_depth, const Eigen::Matrix3f &intrinsic); bool GetHypPoses(void *_depth_on_device, diff --git a/detection_6d_foundationpose/src/foundationpose_utils.hpp b/detection_6d_foundationpose/src/foundationpose_utils.hpp index 7b1b1b7..31fe75a 100644 --- a/detection_6d_foundationpose/src/foundationpose_utils.hpp +++ b/detection_6d_foundationpose/src/foundationpose_utils.hpp @@ -54,12 +54,8 @@ struct FoundationPosePipelinePackage : public async_pipeline::IPipelinePackage { std::shared_ptr depth_on_device; // device端由depth转换得到的xyz_map std::shared_ptr xyz_map_on_device; - // device端的输入mask缓存 - // std::shared_ptr mask_on_device; // 生成的假设位姿 std::vector hyp_poses; - // refine后的位姿 - std::vector refine_poses; // 保存refine阶段用的推理缓存 std::shared_ptr refiner_blobs_buffer; diff --git a/simple_tests/src/test_foundationpose.cpp b/simple_tests/src/test_foundationpose.cpp index 469f8c2..5322eff 100644 --- a/simple_tests/src/test_foundationpose.cpp +++ b/simple_tests/src/test_foundationpose.cpp @@ -17,6 +17,7 @@ static const std::string demo_textured_obj_path = demo_data_path_ + "/mesh/textu static const std::string demo_textured_map_path = demo_data_path_ + "/mesh/texture_map.png"; static const std::string demo_name_ = "mustard"; static const std::string frame_id = "1581120424100262102"; +static const size_t refine_itr = 1; std::tuple, std::shared_ptr> CreateModel() { @@ -58,7 +59,7 @@ TEST(foundationpose_test, test) const Eigen::Vector3f object_dimension = mesh_loader->GetObjectDimension(); Eigen::Matrix4f out_pose; - CHECK(foundation_pose->Register(rgb.clone(), depth, mask, demo_name_, out_pose)); + CHECK(foundation_pose->Register(rgb.clone(), depth, mask, demo_name_, out_pose, refine_itr)); LOG(WARNING) << "first Pose : " << out_pose; // [temp] for test