Skip to content

Commit 959a3ff

Browse files
Separrated test files per category (utils or sam related for now)
1 parent dffbcd3 commit 959a3ff

File tree

3 files changed

+214
-47
lines changed

3 files changed

+214
-47
lines changed

CMakeLists.txt

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -97,13 +97,23 @@ if (CATKIN_ENABLE_TESTING)
9797
# find_package(catkin_lint_cmake REQUIRED)
9898
# catkin_add_catkin_lint_test("-W2 --ignore HEADER_OUTSIDE_PACKAGE_INCLUDE_PATH")
9999

100-
catkin_add_gtest(sam_onnx_ros_tests test/sam_test.cpp)
101-
if(TARGET sam_onnx_ros_tests)
102-
target_link_libraries(sam_onnx_ros_tests sam_onnx_ros_core ${catkin_LIBRARIES})
103-
target_include_directories(sam_onnx_ros_tests PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
100+
# Utils unit tests (no models needed)
101+
catkin_add_gtest(utils_tests test/test_utils.cpp)
102+
if(TARGET utils_tests)
103+
target_link_libraries(utils_tests sam_onnx_ros_core GTest::gtest_main ${catkin_LIBRARIES})
104+
target_include_directories(utils_tests PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
105+
endif()
106+
107+
# SAM integration-ish tests (may need models)
108+
catkin_add_gtest(sam_tests test/sam_test.cpp)
109+
if(TARGET sam_tests)
110+
target_link_libraries(sam_tests sam_onnx_ros_core GTest::gtest_main ${catkin_LIBRARIES})
111+
target_include_directories(sam_tests PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
104112
endif()
105113
endif()
106114

115+
116+
107117
#If you want to debug
108118
# set(CMAKE_BUILD_TYPE Debug)
109119
# set(CMAKE_CXX_FLAGS_DEBUG "-g")

test/sam_test.cpp

Lines changed: 25 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1,77 +1,63 @@
1-
#include "segmentation.h"
2-
#include "sam_inference.h"
31
#include <gtest/gtest.h>
42
#include <opencv2/opencv.hpp>
5-
#include "dl_types.h"
6-
#include "utils.h"
73
#include <filesystem>
4+
#include "segmentation.h"
5+
#include "sam_inference.h"
6+
#include "dl_types.h"
7+
8+
// This file contains higher-level (integration-ish) tests.
9+
// They cover object/session creation and a full pipeline run using synthetic images.
10+
// These tests may require the .onnx model files to be present next to the binary or in a known dir.
811

912
class SamInferenceTest : public ::testing::Test
1013
{
1114
protected:
1215
void SetUp() override
1316
{
14-
// Create test images with different characteristics
17+
// Create simple synthetic images:
18+
// - a white 640x640 (square)
19+
// - a gray 800x600 (non-square)
1520
testImage_640x640 = cv::Mat::ones(640, 640, CV_8UC3) * 255;
1621
testImage_800x600 = cv::Mat::ones(600, 800, CV_8UC3) * 128;
1722

18-
// Create a more realistic test image with some patterns
23+
// A "random noise" image to simulate realistic content for end-to-end checks.
1924
testImage_realistic = cv::Mat(640, 640, CV_8UC3);
2025
cv::randu(testImage_realistic, cv::Scalar(0,0,0), cv::Scalar(255,255,255));
2126

22-
// Setup common parameters
27+
// Cache non-square size for preprocessing helpers.
2328
NonSquareImgSize = { testImage_800x600.cols, testImage_800x600.rows };
2429

25-
// Use the package Initializer/SegmentAnything for the full pipeline
26-
30+
// Use package helpers to build default params and SAM objects.
2731
std::tie(samSegmentors, params_encoder, params_decoder) = Initializer();
2832

2933
#ifdef USE_CUDA
30-
params_encoder.cudaEnable = true;
34+
params_encoder.cudaEnable = true; // Enable CUDA if compiled with it
3135
#else
32-
params_encoder.cudaEnable = false;
36+
params_encoder.cudaEnable = false; // Otherwise run on CPU
3337
#endif
3438
}
3539

40+
// Clean up the SAM objects after each test.
3641
void TearDown() override { samSegmentors[0].reset(); samSegmentors[1].reset(); }
3742

38-
// Test data
43+
// Test data and objects shared across tests.
3944
Utils utilities;
4045
cv::Mat testImage_640x640, testImage_800x600, testImage_realistic;
4146
std::vector<int> NonSquareImgSize;
4247
std::vector<std::unique_ptr<SAM>> samSegmentors;
4348
SEG::DL_INIT_PARAM params_encoder, params_decoder;
4449
};
4550

46-
47-
51+
// Simple smoke test: we can construct a SAM object without throwing.
4852
TEST_F(SamInferenceTest, ObjectCreation)
4953
{
5054
EXPECT_NO_THROW({
5155
SAM localSam;
5256
});
5357
}
5458

55-
TEST_F(SamInferenceTest, PreProcessSquareImage)
56-
{
57-
cv::Mat processedImg;
58-
const char* result = utilities.PreProcess(testImage_640x640, params_encoder.imgSize, processedImg);
59-
60-
EXPECT_EQ(result, nullptr) << "PreProcess should succeed";
61-
EXPECT_EQ(processedImg.size(), cv::Size(1024, 1024)) << "Output should be letterboxed to 1024x1024";
62-
EXPECT_FALSE(processedImg.empty()) << "Processed image should not be empty";
63-
}
64-
65-
TEST_F(SamInferenceTest, PreProcessRectangularImage)
66-
{
67-
cv::Mat processedImg;
68-
const char* result = utilities.PreProcess(testImage_800x600, NonSquareImgSize, processedImg);
69-
70-
EXPECT_EQ(result, nullptr) << "PreProcess should succeed";
71-
EXPECT_EQ(processedImg.size(), cv::Size(800, 600)) << "Output should be letterboxed to 800x600";
72-
EXPECT_FALSE(processedImg.empty()) << "Processed image should not be empty";
73-
}
74-
59+
// Confirms that with a present encoder model we can initialize a session.
60+
// Skips if the model file is not available.
7561
TEST_F(SamInferenceTest, CreateSessionWithValidModel)
7662
{
7763
if (!std::filesystem::exists("SAM_encoder.onnx")) {
@@ -81,29 +67,25 @@ TEST_F(SamInferenceTest, CreateSessionWithValidModel)
8167
EXPECT_NE(samSegmentors[0], nullptr) << "CreateSession should succeed with valid parameters";
8268
}
8369

70+
// Confirms that giving an invalid model path returns an error (no crash).
8471
TEST_F(SamInferenceTest, CreateSessionWithInvalidModel)
8572
{
8673
params_encoder.modelPath = "nonexistent_model.onnx";
8774
const char* result = samSegmentors[0]->CreateSession(params_encoder);
8875
EXPECT_NE(result, nullptr) << "CreateSession should fail with invalid model path";
8976
}
9077

78+
// End-to-end check: with both encoder/decoder models present, the pipeline runs
79+
// and returns a mask vector. Skips if models are not available.
9180
TEST_F(SamInferenceTest, FullInferencePipeline)
9281
{
9382
if (!std::filesystem::exists("SAM_encoder.onnx") ||
9483
!std::filesystem::exists("SAM_mask_decoder.onnx")) {
9584
GTEST_SKIP() << "Models not found in build dir";
9685
}
9786

98-
99-
10087
auto masks = SegmentAnything(samSegmentors, params_encoder, params_decoder, testImage_realistic);
101-
EXPECT_TRUE(masks.size() >= 0) << "Masks should be a valid output vector";
102-
}
10388

104-
// Run all tests
105-
int main(int argc, char **argv)
106-
{
107-
testing::InitGoogleTest(&argc, argv);
108-
return RUN_ALL_TESTS();
89+
// We only check that a vector is returned. (You can strengthen this to EXPECT_FALSE(masks.empty()).)
90+
EXPECT_TRUE(masks.size() >= 0) << "Masks should be a valid output vector";
10991
}

test/test_utils.cpp

Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
#include <gtest/gtest.h>
2+
#include <opencv2/opencv.hpp>
3+
#include "utils.h"
4+
5+
// This file contains small, focused unit tests for Utils.
6+
// We verify image preprocessing (channel conversion, aspect-preserving resize, padding)
7+
// and coordinate scaling to match preprocessing.
8+
9+
// Lightweight fixture: gives each test a fresh Utils instance.
10+
class UtilsTest : public ::testing::Test {
11+
protected:
12+
Utils u;
13+
};
14+
15+
// Checks that a grayscale (1-channel) image is converted to RGB (3-channel)
16+
// and the output image is exactly the requested target size (letterboxed).
17+
TEST_F(UtilsTest, GrayscaleToRGBKeepsSize) {
18+
cv::Mat gray = cv::Mat::zeros(300, 500, CV_8UC1);
19+
cv::Mat out;
20+
std::vector<int> target{1024, 1024};
21+
22+
// Call PreProcess and expect no error.
23+
const char* err = u.PreProcess(gray, target, out);
24+
ASSERT_EQ(err, nullptr);
25+
26+
// After preprocessing, we must have 3 channels (RGB).
27+
EXPECT_EQ(out.channels(), 3);
28+
29+
// The letterboxed output must match the target canvas size.
30+
EXPECT_EQ(out.size(), cv::Size(target[0], target[1]));
31+
}
32+
33+
// Verifies three things:
34+
// 1) Aspect ratio is preserved when resizing to the target.
35+
// 2) The resized image is placed at the top-left (0,0).
36+
// 3) The padding area is zero (black).
37+
TEST_F(UtilsTest, PreprocessTopLeftPaddingAndAspect) {
38+
const cv::Scalar fill(10, 20, 30); // Input color in BGR
39+
cv::Mat img(720, 1280, CV_8UC3, fill);
40+
cv::Mat out;
41+
std::vector<int> target{1024, 1024};
42+
43+
ASSERT_EQ(u.PreProcess(img, target, out), nullptr);
44+
ASSERT_EQ(out.size(), cv::Size(target[0], target[1]));
45+
ASSERT_EQ(out.channels(), 3);
46+
47+
// Width drives resizing here (landscape). Width becomes 1024, height scales accordingly.
48+
int resized_w = target[0];
49+
int resized_h = static_cast<int>(img.rows / (img.cols / static_cast<float>(target[0])));
50+
51+
// PreProcess converts BGR -> RGB, so expected color is swapped.
52+
cv::Scalar expected_rgb(fill[2], fill[1], fill[0]);
53+
54+
// The top-left region (resized content) should keep the image color.
55+
cv::Mat roi_top = out(cv::Rect(0, 0, resized_w, resized_h));
56+
cv::Scalar mean_top = cv::mean(roi_top);
57+
EXPECT_NEAR(mean_top[0], expected_rgb[0], 1.0);
58+
EXPECT_NEAR(mean_top[1], expected_rgb[1], 1.0);
59+
EXPECT_NEAR(mean_top[2], expected_rgb[2], 1.0);
60+
61+
// The area below the resized content (padding) must be zeros.
62+
if (resized_h < target[1]) {
63+
cv::Mat roi_pad = out(cv::Rect(0, resized_h, target[0], target[1] - resized_h));
64+
cv::Mat gray; cv::cvtColor(roi_pad, gray, cv::COLOR_BGR2GRAY);
65+
EXPECT_EQ(cv::countNonZero(gray), 0);
66+
}
67+
}
68+
69+
// Parameterized fixture: used with TEST_P to run the same test body
70+
// for many (input size, target size) pairs.
71+
class UtilsPreprocessParamTest
72+
: public ::testing::TestWithParam<std::tuple<cv::Size, cv::Size>> {
73+
protected:
74+
Utils u;
75+
};
76+
77+
// TEST_P defines a parameterized test. It runs once per parameter set.
78+
// We assert that:
79+
// - Output size equals the target canvas.
80+
// - Output has 3 channels (RGB).
81+
// - The padding area (bottom or right) is zero depending on which side letterboxes.
82+
TEST_P(UtilsPreprocessParamTest, LetterboxWithinBoundsAndChannels3) {
83+
const auto [inSize, target] = GetParam();
84+
cv::Mat img(inSize, CV_8UC3, cv::Scalar(1, 2, 3));
85+
cv::Mat out;
86+
87+
ASSERT_EQ(u.PreProcess(img, {target.width, target.height}, out), nullptr);
88+
EXPECT_EQ(out.size(), target);
89+
EXPECT_EQ(out.channels(), 3);
90+
91+
// Detect which side letterboxes and check that the padded region is zeros.
92+
if (inSize.width >= inSize.height) {
93+
int resized_h = static_cast<int>(inSize.height / (inSize.width / static_cast<float>(target.width)));
94+
if (resized_h < target.height) {
95+
cv::Mat roi_pad = out(cv::Rect(0, resized_h, target.width, target.height - resized_h));
96+
cv::Mat gray; cv::cvtColor(roi_pad, gray, cv::COLOR_BGR2GRAY);
97+
EXPECT_EQ(cv::countNonZero(gray), 0);
98+
}
99+
} else {
100+
int resized_w = static_cast<int>(inSize.width / (inSize.height / static_cast<float>(target.height)));
101+
if (resized_w < target.width) {
102+
cv::Mat roi_pad = out(cv::Rect(resized_w, 0, target.width - resized_w, target.height));
103+
cv::Mat gray; cv::cvtColor(roi_pad, gray, cv::COLOR_BGR2GRAY);
104+
EXPECT_EQ(cv::countNonZero(gray), 0);
105+
}
106+
}
107+
}
108+
109+
// INSTANTIATE_TEST_SUITE_P provides the concrete parameter values.
110+
// Each pair (input size, target size) creates a separate test instance.
111+
INSTANTIATE_TEST_SUITE_P(
112+
ManySizes,
113+
UtilsPreprocessParamTest,
114+
::testing::Values(
115+
std::make_tuple(cv::Size(640, 640), cv::Size(1024, 1024)), // square -> square
116+
std::make_tuple(cv::Size(800, 600), cv::Size(800, 600)), // same size (no resize)
117+
std::make_tuple(cv::Size(600, 800), cv::Size(800, 600)), // portrait -> landscape
118+
std::make_tuple(cv::Size(1280, 720), cv::Size(1024, 1024)) // wide -> square
119+
)
120+
);
121+
122+
// Separate fixture for point scaling tests.
123+
class UtilsScaleBboxPointsTest : public ::testing::Test {
124+
protected:
125+
Utils u;
126+
};
127+
128+
// If the input size and target size are the same, scaling should do nothing.
129+
TEST_F(UtilsScaleBboxPointsTest, IdentityWhenSameSize) {
130+
cv::Mat img(600, 800, CV_8UC3);
131+
std::vector<int> target{800, 600};
132+
std::vector<float> pts{100.f, 100.f, 700.f, 500.f};
133+
std::vector<float> scaled;
134+
135+
u.ScaleBboxPoints(img, target, pts, scaled);
136+
ASSERT_EQ(scaled.size(), pts.size());
137+
EXPECT_NEAR(scaled[0], pts[0], 1e-3);
138+
EXPECT_NEAR(scaled[1], pts[1], 1e-3);
139+
EXPECT_NEAR(scaled[2], pts[2], 1e-3);
140+
EXPECT_NEAR(scaled[3], pts[3], 1e-3);
141+
}
142+
143+
// When width drives the resize (landscape), both x and y are scaled by the same factor.
144+
// We expect coordinates to be multiplied by target_width / input_width.
145+
TEST_F(UtilsScaleBboxPointsTest, ScalesWidthDominant) {
146+
cv::Mat img(300, 600, CV_8UC3); // h=300, w=600 (w >= h)
147+
std::vector<int> target{1200, 600}; // width doubles
148+
std::vector<float> pts{100.f, 50.f, 500.f, 250.f};
149+
std::vector<float> scaled;
150+
151+
u.ScaleBboxPoints(img, target, pts, scaled);
152+
ASSERT_EQ(scaled.size(), pts.size());
153+
const float scale = target[0] / static_cast<float>(img.cols); // 1200/600 = 2
154+
EXPECT_NEAR(scaled[0], pts[0] * scale, 1e-3);
155+
EXPECT_NEAR(scaled[1], pts[1] * scale, 1e-3);
156+
EXPECT_NEAR(scaled[2], pts[2] * scale, 1e-3);
157+
EXPECT_NEAR(scaled[3], pts[3] * scale, 1e-3);
158+
}
159+
160+
// When height drives the resize (portrait), both x and y are scaled by the same factor.
161+
// We expect coordinates to be multiplied by target_height / input_height.
162+
TEST_F(UtilsScaleBboxPointsTest, ScalesHeightDominant) {
163+
cv::Mat img(600, 300, CV_8UC3); // h=600, w=300 (h > w)
164+
std::vector<int> target{600, 1200}; // height doubles
165+
std::vector<float> pts{100.f, 50.f, 200.f, 500.f};
166+
std::vector<float> scaled;
167+
168+
u.ScaleBboxPoints(img, target, pts, scaled);
169+
ASSERT_EQ(scaled.size(), pts.size());
170+
const float scale = target[1] / static_cast<float>(img.rows); // 1200/600 = 2
171+
EXPECT_NEAR(scaled[0], pts[0] * scale, 1e-3);
172+
EXPECT_NEAR(scaled[1], pts[1] * scale, 1e-3);
173+
EXPECT_NEAR(scaled[2], pts[2] * scale, 1e-3);
174+
EXPECT_NEAR(scaled[3], pts[3] * scale, 1e-3);
175+
}

0 commit comments

Comments
 (0)