diff --git a/.gitignore b/.gitignore index 74a0a7a..e429bd6 100644 --- a/.gitignore +++ b/.gitignore @@ -51,5 +51,60 @@ qtcreator-* # Catkin custom files CATKIN_IGNORE +.vscode/* +image-filtering/.vscode/*devel/ +logs/ +build/ +bin/ +lib/ +msg_gen/ +srv_gen/ +msg/*Action.msg +msg/*ActionFeedback.msg +msg/*ActionGoal.msg +msg/*ActionResult.msg +msg/*Feedback.msg +msg/*Goal.msg +msg/*Result.msg +msg/_*.py +build_isolated/ +devel_isolated/ +*/launch/__pycache__ + +# Generated by dynamic reconfigure +*.cfgc +/cfg/cpp/ +/cfg/*.py + +# Ignore generated docs +*.dox +*.wikidoc + +# eclipse stuff +.project +.cproject + +# qcreator stuff +CMakeLists.txt.user + +srv/_*.py +*.pcd +*.pyc +qtcreator-* +*.user + +/planning/cfg +/planning/docs +/planning/src + +*~ + +# Emacs +.#* + +# Catkin custom files +CATKIN_IGNORE + .vscode/ log/ +image-filtering/.vscode/* diff --git a/README.md b/README.md index 2d79e52..53560ea 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # Image Filtering Node [![Industrial CI](https://github.com/vortexntnu/vortex-image-filtering/actions/workflows/industrial-ci.yml/badge.svg)](https://github.com/vortexntnu/vortex-image-filtering/actions/workflows/industrial-ci.yml) +[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/vortexntnu/vortex-image-filtering/main.svg)](https://results.pre-commit.ci/latest/github/vortexntnu/vortex-image-filtering/main) [![codecov](https://codecov.io/github/vortexntnu/vortex-image-filtering/graph/badge.svg?token=6XHprkpUsR)](https://codecov.io/github/vortexntnu/vortex-image-filtering) The `image_filtering_node` is a ROS 2 node developed in the `vortex::image_filters` namespace. It is designed to subscribe to image topics, apply various image filters using OpenCV, and publish the filtered images back to ROS. @@ -31,96 +32,140 @@ Parameters can be set through a YAML file or dynamically adjusted at runtime. ## Implementing New Filters -To extend the functionality of the `image_filtering_node` by adding new filters, follow these steps to ensure compatibility and integration with the existing codebase: +To extend the functionality of the `image_filtering_node` by adding new filters, follow these steps to ensure compatibility and integration with the existing codebase. There should be //TODO(New filter) comments where you add your filter: -### Step 1: Define Filter Parameters +### Step 1: Filter Enum -Each filter should have its own set of parameters encapsulated in a structure. Define this structure within the `vortex::image_filters` namespace. +You should define your filtertype in the filtertype enum in [image_processing.hpp](image-filtering/include/image_filters/image_processing.hpp) ```cpp -struct YourFilterParams { - // Add necessary parameters here - int example_param; +enum class FilterType { + NoFilter, + Flip, + Unsharpening, + Erosion, + Dilation, + ... + // Add your filter here }; ``` -### Step 2: Add to FilterParams Structure - -Integrate your new filter parameters structure into the existing `FilterParams` structure. This allows the `apply_filter` function to access the parameters specific to your filter. +### Step 2: Filter string +To access the filter through the yaml file we need to access it through a string. You need to add it as a string to map to the Enum in [image_processing.hpp](image-filtering/include/image_filters/image_processing.hpp) ```cpp -struct FilterParams { - std::string filter_type; - UnsharpeningFilterParams unsharpening; - ErodingFilterParams eroding; - DilatingFilterParams dilating; - WhiteBalancingFilterParams white_balancing; - EbusFilterParams ebus; - YourFilterParams your_filter; // Add your filter params here +static constexpr std::pair kFilterMap[] = { + {"no_filter", FilterType::NoFilter}, + {"flip", FilterType::Flip}, + {"unsharpening", FilterType::Unsharpening}, + ... + + // Add your filter here + {"example", FilterType::Example}, + {"unknown", FilterType::Unknown} }; ``` -### Step 3: Create the Filter Function -Implement your filter function. This function should take the `cv::Mat` objects for the input and output images and a `const FilterParams&` which includes your specific filter parameters. Make sure to use your parameter structure within this function. +### Step 3: Define Filter Parameters -```cpp -void your_filter_function(const cv::Mat &original, cv::Mat &filtered, const FilterParams& filter_params) { - // Access your filter-specific parameters like this: - int example_param = filter_params.your_filter.example_param; +Each filter should have its own set of parameters encapsulated in a structure. Define this structure within [image_processing.hpp](image-filtering/include/image_filters/image_processing.hpp). - // Implement your filtering logic here -} +```cpp +struct ExampleParams{ + // Add necessary filter parameters here + int example_int; + std::string example_string; +}; ``` -### Step 4: Register the Filter Function - -Add an entry to the `filter_functions` map for your new filter. This step is crucial as it links the filter name (as a string) to the corresponding filter function pointer. +### Step 4: Add filter class +Below the filter parameters add a Class for your filter inheriting from the Filter class, with the same structure as shown below. This should also be in [image_processing.hpp](image-filtering/include/image_filters/image_processing.hpp) ```cpp -std::map filter_functions = { - {"no_filter", no_filter}, - {"sharpening", sharpening_filter}, - {"unsharpening", unsharpening_filter}, - {"eroding", eroding_filter}, - {"dilating", dilating_filter}, - {"white_balancing", white_balance_filter}, - {"ebus", ebus_filter}, - {"your_filter", your_filter_function} // Add your filter here +class Example: public Filter{ + public: + explicit Example(ExampleParams params): filter_params(params) {} + void apply_filter(const cv::Mat& original, cv::Mat& filtered) const override; // This is the filter itself + private: + ExampleParams filter_params; }; ``` +Here you can add other filter specific stuff like storing variables that need to change between runs and so on. -### Step 5: Declare and Assign Parameters -Declare the new filter parameters in the ROS 2 node constructor and assign these parameters to the `FilterParams` structure within the `set_filter_params` function. -#### In the Node Constructor +### Step 5: Create the Filter Function -In the constructor of your ROS 2 node, declare each of the new filter parameters using the `declare_parameter` function. This sets the default values and prepares the node to accept these parameters at runtime through command line or a YAML configuration file. +Implement your filter function in [image_processing.cpp](image-filtering/src/image_processing.cpp). This function should take in the `cv::Mat` objects for the input and the filtered image, and change the filtered one according to your needs. ```cpp -ImageFilteringNode::ImageFilteringNode() : Node("image_filtering_node") -{ - this->declare_parameter("filter_params.your_filter.example_param", "default_value"); - ... - // Other parameters declarations +void Example::apply_filter(const cv::Mat& original, cv::Mat& filtered) const{ + std::string example_str = this->filter_params.example_string; + int example_int = this->filter_params.example_int; + DoExample(original,filtered, example_str, example_int); } ``` +*If you need a helper function go to the [helperfunction](#adding-helper-functions) section of this page. -#### In the set_filter_params Function +### Step 6: Add to config file -In the set_filter_params function, retrieve and assign the parameters to the corresponding fields in the FilterParams structure. Ensure to handle cases where the parameter might not be set or provided. +In the [image_filtering_params.yaml](image-filtering/config/image_filtering_params.yaml) file you add your filter and filterparameters for easily interfacing with the filters: -```cpp +```yaml + filter_params: + filter_type: "example" + + flip: + flip_code: 1 + ... + # Add your filter type here + + example: + example_int: 5 + example_string: "This is an example" +``` + + +### Step 7: Declare and Assign Parameters -void ImageFilteringNode::set_filter_params(){ - FilterParams params = filter_params_; // assuming filter_params_ is already defined in your class +In the constructor of your ROS 2 node, declare each of the new filter parameters using the `declare_parameter` function in [image_filtering_ros.cpp](image-filtering/src/image_filtering_ros.cpp). This declares the ros2 parameters and prepares the node to accept them at runtime through command line or the YAML configuration file. + +```cpp +void ImageFilteringNode::declare_parameters() { + // Declare your parameters here + this->declare_parameter("filter_params.example.example_int"); + this->declare_parameter("filter_params.example.example_string"); +} +``` - params.your_filter.example_param = this->get_parameter("filter_params.your_filter.example_param").as_string(); +Then in the same file you make a new case in `set_filter_params` for your filter, to set the variables you just declared. +```cpp +void ImageFilteringNode::set_filter_params() { ... - // Retrieve other parameters and handle cases where parameters might not be provided - filter_params_ = params; // Update the filter parameters structure - RCLCPP_INFO(this->get_logger(), "Filter parameters updated for your_filter."); + switch (filter_type){ + + ... + // Add case here + case FilterType::Example: + { + ExampleParams params; + params.example_int = + this->get_parameter("filter_params.example.example_int").as_int(); + params.example_string = + this->get_parameter("filter_params.example.example_string").as_string(); + + filter_ptr = std::make_unique(params); + break; + } + + } } ``` + + + +#### Adding Helper functions + +If you need helper functions for your filter, you can add the declaration to [utilities.hpp](image-filtering/include/image_filters/utilities.hpp), and then add the function definition to [utilities.cpp](image-filtering/src/utilities.cpp). There will be TODO(New filter) comments where you can add them. These functions are already included in the image_processing files. diff --git a/image-filtering/CMakeLists.txt b/image-filtering/CMakeLists.txt index c29732d..e7e2440 100644 --- a/image-filtering/CMakeLists.txt +++ b/image-filtering/CMakeLists.txt @@ -25,6 +25,7 @@ set(LIB_NAME "${PROJECT_NAME}_component") add_library(${LIB_NAME} SHARED src/image_processing.cpp src/image_filtering_ros.cpp + src/utilities.cpp ) target_link_libraries(${LIB_NAME} PUBLIC diff --git a/image-filtering/config/image_filtering_params.yaml b/image-filtering/config/image_filtering_params.yaml index d5272f7..b69cf75 100644 --- a/image-filtering/config/image_filtering_params.yaml +++ b/image-filtering/config/image_filtering_params.yaml @@ -1,10 +1,12 @@ /**: ros__parameters: - sub_topic: "/downwards_camera/image_raw" + sub_topic: "/fls_publisher/display_mono" pub_topic: "/filtered_image" + input_encoding: "mono8" output_encoding: "mono8" + filter_params: - filter_type: "otsu" + filter_type: "example" flip: flip_code: 1 unsharpening: @@ -12,9 +14,9 @@ erosion: size: 1 dilation: - size: 1 + size: 3 white_balancing: - contrast_percentage: 0.1 + contrast_percentage: 10.0 ebus: erosion_size: 2 blur_size: 30 @@ -26,8 +28,24 @@ gamma_auto_correction: true gamma_auto_correction_weight: 4.0 otsu_segmentation: true - erosion_size: 10 - dilation_size: 10 + erosion_size: 2 + dilation_size: 2 + overlap: + percentage_threshold: 20.0 # Percentage (0-100) to cap the pixel intensity difference + median_binary: # finds the median of each n x n square around each pixel + kernel_size: 3 # must be odd >= 1 + threshold: 100 # [0, 255] + invert: false + binary: + threshold: 20. # in percent + maxval: 255. + invert: true + + # TODO(New filter): add your filterparameters here + example: + example_int: 1 + example_string: "Get filtered" + # Filter params should reflect the FilterParams struct # defined in /include/image_filters/image_processing.hpp diff --git a/image-filtering/include/image_filters/image_filtering_ros.hpp b/image-filtering/include/image_filters/image_filtering_ros.hpp index 4712d7a..fcc6a08 100644 --- a/image-filtering/include/image_filters/image_filtering_ros.hpp +++ b/image-filtering/include/image_filters/image_filtering_ros.hpp @@ -3,6 +3,8 @@ #include #include +#include +#include #include #include #include @@ -11,6 +13,7 @@ #include #include "image_processing.hpp" + class ImageFilteringNode : public rclcpp::Node { public: explicit ImageFilteringNode(const rclcpp::NodeOptions& options); @@ -99,16 +102,10 @@ class ImageFilteringNode : public rclcpp::Node { std::string image_topic_; /** - * @brief The filter parameters - * - */ - FilterParams filter_params_; - - /** - * @brief filter to apply + * @brief Pointer to the filter object * */ - std::string filter_; + std::unique_ptr filter_ptr; }; #endif // IMAGE_FILTERS__IMAGE_FILTERING_ROS_HPP_ diff --git a/image-filtering/include/image_filters/image_filtering_ros_utils.hpp b/image-filtering/include/image_filters/image_filtering_ros_utils.hpp new file mode 100644 index 0000000..eae73c4 --- /dev/null +++ b/image-filtering/include/image_filters/image_filtering_ros_utils.hpp @@ -0,0 +1,3 @@ +// FilterType ImageFilteringNode::create_binary_filter(){ + +// } \ No newline at end of file diff --git a/image-filtering/include/image_filters/image_processing.hpp b/image-filtering/include/image_filters/image_processing.hpp index 59e9bf5..3b50e83 100644 --- a/image-filtering/include/image_filters/image_processing.hpp +++ b/image-filtering/include/image_filters/image_processing.hpp @@ -9,34 +9,227 @@ #include #include #include +#include +#include #include +#include -struct FlipParams { - int flip_code; +enum class FilterType { // TODO(New filter): Add filters here + NoFilter, + Flip, + Unsharpening, + Erosion, + Dilation, + WhiteBalancing, + Ebus, + Otsu, + Overlap, + MedianBinary, + Binary, + + Example, + Unknown +}; + +static constexpr std::pair kFilterMap[] = { + {"no_filter", FilterType::NoFilter}, + {"flip", FilterType::Flip}, + {"unsharpening", FilterType::Unsharpening}, + {"erosion", FilterType::Erosion}, + {"dilation", FilterType::Dilation}, + {"white_balancing", FilterType::WhiteBalancing}, + {"ebus", FilterType::Ebus}, + {"otsu", FilterType::Otsu}, + {"overlap", FilterType::Overlap}, + {"median_binary", FilterType::MedianBinary}, + {"binary", FilterType::Binary}, + + // TODO(New filter): Also add your filter here + {"example", FilterType::Example}, + {"unknown", FilterType::Unknown}}; + +inline std::string to_lower(std::string s) { + for (char& ch : s) { + ch = static_cast(std::tolower(static_cast(ch))); + } + return s; +} + +inline FilterType parse_filter_type(std::string s) { + s = to_lower(std::move(s)); + + for (auto [name, type] : kFilterMap) { + if (s == name) + return type; + } + spdlog::warn(fmt::format(fmt::fg(fmt::rgb(200, 180, 50)), "No string connected to that filter type: '{}'. This might be misspelling or you need to add the filter type to kFilterMap in image_processing.hpp", s)); + return FilterType::Unknown; +} + +inline std::string_view filtertype_to_string(FilterType t) { + for (auto [name, type] : kFilterMap) { + if (t == type) + return name; + } + spdlog::warn(fmt::format(fmt::fg(fmt::rgb(200, 180, 50)), " No string connected to your filter type. To fix " + "this add the string and filter type to kFilterMap")); + return "unknown"; +} + +class Filter { + public: + virtual ~Filter() = default; + virtual void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const = 0; + + protected: + Filter() = default; +}; + +///////////////////////////// +// No filter +///////////////////////////// + +struct NoFilterParams {}; + +class NoFilter : public Filter { + public: + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override { + original.copyTo(filtered); + }; }; + +//////////////////////////// +// Unsharpening +///////////////////////////// + struct UnsharpeningParams { int blur_size; }; -struct ErodingParams { - int size; +class Unsharpening : public Filter { + public: + explicit Unsharpening(UnsharpeningParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + UnsharpeningParams filter_params; +}; + +///////////////////////////// +// Sharpening +///////////////////////////// + +struct FlipParams { + int flip_code; +}; + +class Flip : public Filter { + public: + explicit Flip(FlipParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + FlipParams filter_params; +}; + +///////////////////////////// +// Sharpening +///////////////////////////// + +struct SharpeningParams {}; + +class Sharpening : public Filter { + public: + explicit Sharpening(SharpeningParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + SharpeningParams filter_params; +}; +///////////////////////////// +// Erosion +///////////////////////////// + +struct ErosionParams { + int kernel_size; // odd > 1 +}; + +class Erosion : public Filter { + public: + explicit Erosion(ErosionParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + ErosionParams filter_params; +}; + +///////////////////////////// +// Dilation +///////////////////////////// + +struct DilationParams { + int kernel_size = 3; }; -struct DilatingParams { - int size; +class Dilation : public Filter { + public: + explicit Dilation(DilationParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + DilationParams filter_params; }; -struct WhiteBalancingParams { +///////////////////////////// +// White Balance +///////////////////////////// + +struct WhiteBalanceParams { double contrast_percentage; }; +class WhiteBalance : public Filter { + public: + explicit WhiteBalance(WhiteBalanceParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + WhiteBalanceParams filter_params; +}; + +///////////////////////////// +// Ebus (dilation + unsharpening combo) +///////////////////////////// + struct EbusParams { int erosion_size; int blur_size; int mask_weight; }; -struct OtsuParams { +class Ebus : public Filter { + public: + explicit Ebus(EbusParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + EbusParams filter_params; +}; + +///////////////////////////// +// Otsu Segmentation +///////////////////////////// + +struct OtsuSegmentationParams { bool gamma_auto_correction; double gamma_auto_correction_weight; bool otsu_segmentation; @@ -47,100 +240,100 @@ struct OtsuParams { int dilation_size; }; -struct FilterParams { - FlipParams flip; - UnsharpeningParams unsharpening; - ErodingParams eroding; - DilatingParams dilating; - WhiteBalancingParams white_balancing; - EbusParams ebus; - OtsuParams otsu; -}; - -typedef void (*FilterFunction)(const FilterParams&, const cv::Mat&, cv::Mat&); - -/** - * Reads the filter_type from the FilterParams struct - * and calls the appropriate filter function from the filter_functions map. - */ -void apply_filter(const std::string& filter, - const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered); - -/** - * No filter, just copy the image - */ -void no_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& modified); - -/** - * Flips the image - */ -void flip_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& modified); - -/** - * Makes edges harder - */ -void sharpening_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& modified); - -/** - * Makes edges harder in a smarter way - */ -void unsharpening_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& modified); - -/** - * Expands the dark areas of the image - */ -void erosion_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& modified); - -/** - * Expands the bright areas of the image - */ -void dilation_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& modified); - -/** - * White Balancing Filter - */ -void white_balance_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered); - -/** - * A filter that worked well-ish in the mc-lab conditions easter 2023 - * Uses a combination of dilation and unsharpening - */ -void ebus_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered); - -/** - * A filter based on Otsu's method - */ -void otsu_segmentation_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& output); - -static const std::map filter_functions = { - {"no_filter", no_filter}, - {"flip", flip_filter}, - {"sharpening", sharpening_filter}, - {"unsharpening", unsharpening_filter}, - {"erosion", erosion_filter}, - {"dilation", dilation_filter}, - {"white_balancing", white_balance_filter}, - {"ebus", ebus_filter}, - {"otsu", otsu_segmentation_filter}}; +class OtsuSegmentation : public Filter { + public: + explicit OtsuSegmentation(OtsuSegmentationParams params) + : filter_params(params) {} + void apply_filter(const cv::Mat& original, cv::Mat& output) const override; + + private: + OtsuSegmentationParams filter_params; +}; + +///////////////////////////// +// Overlap (blend/composite) +///////////////////////////// + +struct OverlapParams { + double percentage_threshold; // 0..100 (percent) +}; + +class Overlap : public Filter { + public: + explicit Overlap(OverlapParams params) + : filter_params(params), has_prev(false) {} + + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + OverlapParams filter_params; + + // Cached previous mono8 frame + mutable cv::Mat prev; + mutable bool has_prev; +}; + +///////////////////////////// +// Median + Binary +///////////////////////////// + +struct MedianBinaryParams { + int kernel_size; + int threshold; + bool invert; +}; + +class MedianBinary : public Filter { + public: + explicit MedianBinary(MedianBinaryParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + MedianBinaryParams filter_params; +}; + +///////////////////////////// +// Binary Threshold +///////////////////////////// + +struct BinaryThresholdParams { + double threshold; + double maxval; + bool invert; +}; + +class BinaryThreshold : public Filter { + public: + explicit BinaryThreshold(BinaryThresholdParams params) + : filter_params(params) {} + void apply_filter(const cv::Mat& original, + cv::Mat& filtered) const override; + + private: + BinaryThresholdParams filter_params; +}; + +// TODO(New filter): add this structure for your filter + +///////////////////////////// +// Example +///////////////////////////// + +// Example: +struct ExampleParams { // Add filter variables here + int example_int; + std::string example_string; +}; + +class Example : public Filter { + public: + explicit Example(ExampleParams params) : filter_params(params) {} + void apply_filter(const cv::Mat& original, cv::Mat& filtered) + const override; // This is the filter itself + private: + ExampleParams filter_params; +}; #endif // IMAGE_FILTERS__IMAGE_PROCESSING_HPP_ diff --git a/image-filtering/include/image_filters/utilities.hpp b/image-filtering/include/image_filters/utilities.hpp new file mode 100644 index 0000000..2ad41f8 --- /dev/null +++ b/image-filtering/include/image_filters/utilities.hpp @@ -0,0 +1,70 @@ +#ifndef IMAGE_FILTERS__UTILITIES_HPP_ +#define IMAGE_FILTERS__UTILITIES_HPP_ + +#include +#include +#include +#include +#include +#include +#include + +// Auto-choose a gamma so dark images get lifted and bright images get toned +// down (expects mono8) +void apply_auto_gamma(cv::Mat& image, double correction_weight); + +// Converts a BGR image to grayscale using custom channel weights (wB, wG, wR). +void to_weighted_gray(const cv::Mat& bgr, + cv::Mat& gray, + double wB, + double wG, + double wR); + +// Applies Otsu’s automatic thresholding and returning the threshold +int apply_otsu(const cv::Mat& gray8u, + cv::Mat& out, + bool invert = false, + double maxval = 255.0); + +// Performs morphological erosion (shrinks bright regions / removes small white +// noise) +void apply_erosion(const cv::Mat& src, + cv::Mat& dst, + int size, + int shape = cv::MORPH_RECT); + +// Performs morphological dilation (grows bright regions / fills small holes) +// opposite of erosion +void apply_dilation(const cv::Mat& src, + cv::Mat& dst, + int size, + int shape = cv::MORPH_RECT); + +// Applies a median blur with `kernel_size` to reduce salt-and-pepper noise +// while preserving edges. +void apply_median(const cv::Mat& original, cv::Mat& filtered, int kernel_size); + +// Apply a fixed binary threshold. +// - Accepts grayscale or color input (auto-converts to gray). +// - Ensures 8-bit depth for thresholding. +// - Returns a 0/255 mask (CV_8U). +// - Set `invert=true` to get white background & black foreground. +void apply_fixed_threshold(const cv::Mat& img, + cv::Mat& filtered, + int thresh, + bool invert = false); + +// This does not work properly (Chat has failed me) +void distance_field(const cv::Mat& binObstacles, + cv::Mat& dist, + bool obstaclesAreWhite = true, + int type = cv::DIST_L2, + int maskSize = 3); // DIST_L2 is normal euclidean + +// TODO(New filter): If you need a helper function have the signature here +void apply_example(const cv::Mat& original, + cv::Mat& filtered, + std::string example_string, + int example_int); + +#endif // IMAGE_FILTERS__UTILITIES_HPP_ diff --git a/image-filtering/package.xml b/image-filtering/package.xml index 31aabc9..8e40162 100644 --- a/image-filtering/package.xml +++ b/image-filtering/package.xml @@ -4,7 +4,7 @@ image_filtering 0.0.1 The image filtering package - Jorgen Fjermedal + Thomas Paulen MIT ament_cmake diff --git a/image-filtering/src/image_filtering_ros.cpp b/image-filtering/src/image_filtering_ros.cpp index 4dd53bf..3594f05 100644 --- a/image-filtering/src/image_filtering_ros.cpp +++ b/image-filtering/src/image_filtering_ros.cpp @@ -23,6 +23,7 @@ void ImageFilteringNode::declare_parameters() { this->declare_parameter("sub_topic"); this->declare_parameter("pub_topic"); this->declare_parameter("output_encoding"); + this->declare_parameter("input_encoding"); this->declare_parameter("filter_params.filter_type"); this->declare_parameter("filter_params.flip.flip_code"); this->declare_parameter("filter_params.unsharpening.blur_size"); @@ -42,57 +43,208 @@ void ImageFilteringNode::declare_parameters() { this->declare_parameter("filter_params.otsu.gsc_weight_b"); this->declare_parameter("filter_params.otsu.erosion_size"); this->declare_parameter("filter_params.otsu.dilation_size"); + + this->declare_parameter( + "filter_params.overlap.percentage_threshold"); + + this->declare_parameter("filter_params.binary.threshold"); + this->declare_parameter("filter_params.binary.maxval"); + this->declare_parameter("filter_params.binary.invert"); + + this->declare_parameter("filter_params.median_binary.kernel_size"); + this->declare_parameter("filter_params.median_binary.threshold"); + this->declare_parameter("filter_params.median_binary.invert"); + + // TODO(New filter): Declare parameters set for your filter here + this->declare_parameter("filter_params.example.example_int"); + this->declare_parameter( + "filter_params.example.example_string"); } void ImageFilteringNode::set_filter_params() { - FilterParams params; - std::string filter = + std::string filter_type_string = this->get_parameter("filter_params.filter_type").as_string(); - if (!filter_functions.contains(filter)) { - spdlog::warn( - "Invalid filter type received: {}. Using default: no_filter.", - filter); - filter_ = "no_filter"; - } else { - filter_ = filter; + FilterType filter_type = parse_filter_type(filter_type_string); + + switch (filter_type) { + + case FilterType::NoFilter: { + filter_ptr = std::make_unique(); + break; + } + case FilterType::Unsharpening: { + UnsharpeningParams params; + params.blur_size = + this->get_parameter("filter_params.unsharpening.blur_size") + .as_int(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::Flip: { + FlipParams params; + params.flip_code = + this->get_parameter("filter_params.flip.flip_code").as_int(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::Erosion: { + ErosionParams params; + params.kernel_size = + this->get_parameter("filter_params.erosion.size").as_int(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::Dilation: { + DilationParams params; + params.kernel_size = + this->get_parameter("filter_params.dilation.size").as_int(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::WhiteBalancing: { + WhiteBalanceParams params; + params.contrast_percentage = + this->get_parameter( + "filter_params.white_balancing.contrast_percentage") + .as_double(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::Ebus: { + EbusParams params; + params.erosion_size = + this->get_parameter("filter_params.ebus.erosion_size").as_int(); + params.blur_size = + this->get_parameter("filter_params.ebus.blur_size").as_int(); + params.mask_weight = + this->get_parameter("filter_params.ebus.mask_weight").as_int(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::Otsu: { + OtsuSegmentationParams params; + params.gamma_auto_correction = + this->get_parameter("filter_params.otsu.gamma_auto_correction") + .as_bool(); + params.gamma_auto_correction_weight = + this->get_parameter( + "filter_params.otsu.gamma_auto_correction_weight") + .as_double(); + params.otsu_segmentation = + this->get_parameter("filter_params.otsu.otsu_segmentation") + .as_bool(); + params.gsc_weight_r = + this->get_parameter("filter_params.otsu.gsc_weight_r") + .as_double(); + params.gsc_weight_g = + this->get_parameter("filter_params.otsu.gsc_weight_g") + .as_double(); + params.gsc_weight_b = + this->get_parameter("filter_params.otsu.gsc_weight_b") + .as_double(); + params.erosion_size = + this->get_parameter("filter_params.otsu.erosion_size").as_int(); + params.dilation_size = + this->get_parameter("filter_params.otsu.dilation_size") + .as_int(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::Overlap: { + OverlapParams params; + params.percentage_threshold = + this->get_parameter( + "filter_params.overlap.percentage_threshold") + .as_double(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::MedianBinary: { + MedianBinaryParams params; + params.kernel_size = + this->get_parameter("filter_params.median_binary.kernel_size") + .as_int(); + params.threshold = + this->get_parameter("filter_params.median_binary.threshold") + .as_int(); + params.invert = + this->get_parameter("filter_params.median_binary.invert") + .as_bool(); + + filter_ptr = std::make_unique(params); + break; + } + + case FilterType::Binary: { + BinaryThresholdParams params; + params.threshold = + this->get_parameter("filter_params.binary.threshold") + .as_double(); + params.maxval = + this->get_parameter("filter_params.binary.maxval").as_double(); + params.invert = + this->get_parameter("filter_params.binary.invert").as_bool(); + + filter_ptr = std::make_unique(params); + break; + } + + // TODO(New filter): Add your filter case here: + case FilterType::Example: { + ExampleParams params; + params.example_int = + this->get_parameter("filter_params.example.example_int") + .as_int(); + params.example_string = + this->get_parameter("filter_params.example.example_string") + .as_string(); + + filter_ptr = std::make_unique(params); + break; + } + + default:; + if (filter_type == FilterType::Unknown){ + spdlog::warn(fmt::format(fmt::fg(fmt::rgb(200, 180, 50)), + "Invalid filter type received: {}. Using default: no_filter.", + filter_type_string)); + } + else{ + spdlog::warn(fmt::format(fmt::fg(fmt::rgb(200, 180, 50)), + "Filterparams has not been set for your chosen filter " + "{}. " + "To fix this add your filter to " + "ImageFilteringNode::set_filter_params(). " + "Using default: no_filter.", + filter_type_string)); + } + + filter_ptr = std::make_unique(); + filter_type = FilterType::NoFilter; + + return; } - params.flip.flip_code = - this->get_parameter("filter_params.flip.flip_code").as_int(); - params.unsharpening.blur_size = - this->get_parameter("filter_params.unsharpening.blur_size").as_int(); - params.eroding.size = - this->get_parameter("filter_params.erosion.size").as_int(); - params.dilating.size = - this->get_parameter("filter_params.dilation.size").as_int(); - params.white_balancing.contrast_percentage = - this->get_parameter("filter_params.white_balancing.contrast_percentage") - .as_double(); - params.ebus.erosion_size = - this->get_parameter("filter_params.ebus.erosion_size").as_int(); - params.ebus.blur_size = - this->get_parameter("filter_params.ebus.blur_size").as_int(); - params.ebus.mask_weight = - this->get_parameter("filter_params.ebus.mask_weight").as_int(); - params.otsu.gamma_auto_correction = - this->get_parameter("filter_params.otsu.gamma_auto_correction") - .as_bool(); - params.otsu.gamma_auto_correction_weight = - this->get_parameter("filter_params.otsu.gamma_auto_correction_weight") - .as_double(); - params.otsu.otsu_segmentation = - this->get_parameter("filter_params.otsu.otsu_segmentation").as_bool(); - params.otsu.gsc_weight_r = - this->get_parameter("filter_params.otsu.gsc_weight_r").as_double(); - params.otsu.gsc_weight_g = - this->get_parameter("filter_params.otsu.gsc_weight_g").as_double(); - params.otsu.gsc_weight_b = - this->get_parameter("filter_params.otsu.gsc_weight_b").as_double(); - params.otsu.erosion_size = - this->get_parameter("filter_params.otsu.erosion_size").as_int(); - params.otsu.dilation_size = - this->get_parameter("filter_params.otsu.dilation_size").as_int(); - filter_params_ = params; - spdlog::info("Filter parameters set: {}", filter); + + + + spdlog::info(fmt::format(fmt::fg(fmt::rgb(31, 161, 221)),"Using filter: {}", + filter_type_string)); } void ImageFilteringNode::check_and_subscribe_to_image_topic() { @@ -142,8 +294,15 @@ void ImageFilteringNode::image_callback( const sensor_msgs::msg::Image::SharedPtr msg) { cv_bridge::CvImagePtr cv_ptr; + std::string input_encoding = + this->get_parameter("input_encoding").as_string(); + + if (input_encoding.empty()) { + input_encoding = msg->encoding; // Default to the input image encoding + } + try { - cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); + cv_ptr = cv_bridge::toCvCopy(msg, input_encoding); if (cv_ptr->image.empty()) { spdlog::error("Received empty image, skipping processing."); @@ -151,12 +310,23 @@ void ImageFilteringNode::image_callback( } } catch (cv_bridge::Exception& e) { spdlog::error("cv_bridge exception: {}", e.what()); - return; + ; } cv::Mat input_image = cv_ptr->image; cv::Mat filtered_image; - apply_filter(filter_, filter_params_, input_image, filtered_image); + + try{ + filter_ptr->apply_filter(input_image, filtered_image); + } + catch (const cv::Exception& e) { + spdlog::error(fmt::format(fmt::fg(fmt::rgb(31, 161, 221)), "OpenCV error while applying filter: {}", e.what())); + filtered_image = input_image.clone(); // fallback to no filter + } + catch (const std::exception& e) { + spdlog::error(fmt::format(fmt::fg(fmt::rgb(31, 161, 221)),"Error while applying filter: {}", e.what())); + filtered_image = input_image.clone(); + } std::string output_encoding = this->get_parameter("output_encoding").as_string(); diff --git a/image-filtering/src/image_filtering_ros_utils.cpp b/image-filtering/src/image_filtering_ros_utils.cpp new file mode 100644 index 0000000..e69de29 diff --git a/image-filtering/src/image_processing.cpp b/image-filtering/src/image_processing.cpp index 00b4aec..a3c3a98 100644 --- a/image-filtering/src/image_processing.cpp +++ b/image-filtering/src/image_processing.cpp @@ -1,31 +1,23 @@ #include +#include #include -void no_filter([[maybe_unused]] const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - original.copyTo(filtered); -} - -void flip_filter([[maybe_unused]] const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - int flip_code = params.flip.flip_code; // 0: x-axis, 1: y-axis, -1: both +void Flip::apply_filter(const cv::Mat& original, cv::Mat& filtered) const { + int flip_code = + this->filter_params.flip_code; // 0: x-axis, 1: y-axis, -1: both cv::flip(original, filtered, flip_code); } -void sharpening_filter([[maybe_unused]] const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { +void Sharpening::apply_filter(const cv::Mat& original, + cv::Mat& filtered) const { // Sharpen image cv::Mat kernel = (cv::Mat_(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0); cv::filter2D(original, filtered, -1, kernel); } -void unsharpening_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - int blur_size = params.unsharpening.blur_size; +void Unsharpening::apply_filter(const cv::Mat& original, + cv::Mat& filtered) const { + int blur_size = this->filter_params.blur_size; // Create a blurred version of the image cv::Mat blurred; GaussianBlur(original, blurred, @@ -38,57 +30,32 @@ void unsharpening_filter(const FilterParams& params, addWeighted(original, 1, mask, 3, 0, filtered); } -void erosion_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - int erosion_size = params.eroding.size; - // Create a structuring element for dilation and erosion - cv::Mat element = cv::getStructuringElement( - cv::MORPH_RECT, cv::Size(2 * erosion_size + 1, 2 * erosion_size + 1), - cv::Point(erosion_size, erosion_size)); - - // Apply erosion to the image - cv::erode(original, filtered, element); +void Erosion::apply_filter(const cv::Mat& original, cv::Mat& filtered) const { + apply_erosion(original, filtered, this->filter_params.kernel_size, + cv::MORPH_RECT); } -void dilation_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - int dilation_size = params.dilating.size; - // Create a structuring element for dilation and erosion - cv::Mat element = cv::getStructuringElement( - cv::MORPH_RECT, cv::Size(2 * dilation_size + 1, 2 * dilation_size + 1), - cv::Point(dilation_size, dilation_size)); - - // Apply dilation to the image - cv::dilate(original, filtered, element); +void Dilation::apply_filter(const cv::Mat& original, cv::Mat& filtered) const { + apply_dilation(original, filtered, this->filter_params.kernel_size, + cv::MORPH_RECT); } -void white_balance_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - double contrast_percentage = params.white_balancing.contrast_percentage; +void WhiteBalance::apply_filter(const cv::Mat& original, + cv::Mat& filtered) const { + double contrast_percentage = this->filter_params.contrast_percentage; cv::Ptr balance = cv::xphoto::createSimpleWB(); balance->setP(contrast_percentage); balance->balanceWhite(original, filtered); } -void ebus_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - int blur_size = params.ebus.blur_size; - int mask_weight = params.ebus.mask_weight; +void Ebus::apply_filter(const cv::Mat& original, cv::Mat& filtered) const { + int blur_size = this->filter_params.blur_size; + int mask_weight = this->filter_params.mask_weight; + int erosion_size = this->filter_params.erosion_size; // Erode image to make blacks more black cv::Mat eroded; - int erosion_size = params.eroding.size; - // Create a structuring element for dilation and erosion - cv::Mat element = cv::getStructuringElement( - cv::MORPH_RECT, cv::Size(2 * erosion_size + 1, 2 * erosion_size + 1), - cv::Point(erosion_size, erosion_size)); - - // Apply erosion to the image - cv::erode(original, eroded, element); + apply_erosion(original, eroded, erosion_size); // Make an unsharp mask from original image cv::Mat blurred; @@ -104,145 +71,116 @@ void ebus_filter(const FilterParams& params, addWeighted(eroded, 1, mask, mask_weight, 0, filtered); } -void applyGammaCorrection(cv::Mat& image, double gamma) { - // Create a lookup table for gamma correction - cv::Mat lookup(1, 256, CV_8U); - uchar* p = lookup.ptr(); - for (int i = 0; i < 256; ++i) { - p[i] = cv::saturate_cast(pow(i / 255.0, gamma) * 255.0); - } +void OtsuSegmentation::apply_filter(const cv::Mat& original, + cv::Mat& filtered) const { + bool gamma_auto_correction = this->filter_params.gamma_auto_correction; + double gamma_auto_correction_weight = + this->filter_params.gamma_auto_correction_weight; - // Apply the gamma correction using the lookup table - cv::LUT(image, lookup, image); -} + bool otsu_segmentation = this->filter_params.otsu_segmentation; -double calculateAutoGamma(const cv::Mat& image) { - // Convert the image to grayscale if it's not already - cv::Mat grayImage; - if (image.channels() == 3) { - cv::cvtColor(image, grayImage, cv::COLOR_BGR2GRAY); - } else { - grayImage = image; + if (original.type() == CV_8UC3) { // if the image type is bgr8 + to_weighted_gray(original, filtered, this->filter_params.gsc_weight_b, + this->filter_params.gsc_weight_g, + this->filter_params.gsc_weight_r); + } else if (original.type() == CV_8UC1) { + original.copyTo(filtered); + } // if its mono8 + else + spdlog::error("your image type is not matching this filter"); + + if (gamma_auto_correction) { + apply_auto_gamma(filtered, gamma_auto_correction_weight); } - // Calculate the mean intensity - cv::Scalar meanIntensity = mean(grayImage); + if (otsu_segmentation) { + apply_otsu(filtered, filtered, false, 255); - // The ideal mean intensity is 128 (midpoint for 8-bit grayscale images) - double idealMean = 128.0; - double currentMean = meanIntensity[0]; + // Apply erosion followed by dilation (opening) - // Automatically set gamma value based on the mean intensity - double gamma; - if (currentMean > 0) { - gamma = log(idealMean / 255.0) / log(currentMean / 255.0); - } else { - gamma = 1.0; // Default gamma if the image has no intensity + apply_erosion(filtered, filtered, this->filter_params.erosion_size, + cv::MORPH_CROSS); + apply_dilation(filtered, filtered, this->filter_params.dilation_size, + cv::MORPH_CROSS); } +} - // Ensure gamma is within a reasonable range (e.g., between 0.1 and 3.0) - gamma = std::max(0.1, std::min(gamma, 3.0)); +void Overlap::apply_filter(const cv::Mat& original, cv::Mat& filtered) const { + // mono8 only + CV_Assert(!original.empty()); + CV_Assert(original.type() == CV_8UC1); - return gamma; -} + double thr_percent = filter_params.percentage_threshold; -void otsu_segmentation_filter(const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - bool gamma_auto_correction = params.otsu.gamma_auto_correction; - double gamma_auto_correction_weight = - params.otsu.gamma_auto_correction_weight; + // First frame (or size/type change): passthrough + cache + if (!has_prev || prev.empty() || prev.size() != original.size() || + prev.type() != original.type()) { + original.copyTo(filtered); + prev = original.clone(); + has_prev = true; + return; + } - bool otsu_segmentation = params.otsu.otsu_segmentation; + // Absolute difference (still mono8) + cv::Mat diff8u; + cv::absdiff(original, prev, diff8u); - cv::Mat grayImage; + // Convert to percent of 8-bit range (0..100) + cv::Mat percent32f; + diff8u.convertTo(percent32f, CV_32F, 100.0 / 255.0); - cv::Matx13f customWeights(params.otsu.gsc_weight_b, - params.otsu.gsc_weight_g, - params.otsu.gsc_weight_r); - cv::transform(original, filtered, customWeights); + // Build mask where change > threshold + cv::Mat mask8u; + cv::threshold(percent32f, mask8u, thr_percent, 255.0, cv::THRESH_BINARY); + mask8u.convertTo(mask8u, CV_8U); - if (gamma_auto_correction) { - double autoGamma = - calculateAutoGamma(filtered) * gamma_auto_correction_weight; + // Output: same as original except zero-out "changed" pixels + filtered = original.clone(); + filtered.setTo(0, mask8u); - applyGammaCorrection(filtered, autoGamma); - } + // Update history (write to cached previous) + prev = original.clone(); +} - if (otsu_segmentation) { - // Calculate the histogram - int histSize = 256; - float range[] = {0, 256}; - const float* histRange = {range}; - cv::Mat hist; - calcHist(&filtered, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, - true, false); - - // Normalize histogram to get probabilities - hist /= filtered.total(); - - // Initialize variables for Otsu's method - std::vector sigma2_list(256, 0.0); - std::vector p(hist.begin(), - hist.end()); // Probabilities - - for (int th = 1; th < 256; ++th) { - // Calculate omega (weights) for foreground and background - float omega_fg = std::accumulate(p.begin(), p.begin() + th, 0.0f); - float omega_bg = std::accumulate(p.begin() + th, p.end(), 0.0f); - - // Calculate mu (means) for foreground and background - float mu_fg = 0, mu_bg = 0; - for (int i = 0; i < th; ++i) { - mu_fg += i * p[i]; - } - for (int i = th; i < 256; ++i) { - mu_bg += i * p[i]; - } - - if (omega_fg > 0) - mu_fg /= omega_fg; - if (omega_bg > 0) - mu_bg /= omega_bg; - - // Calculate sigma squared and store in list - sigma2_list[th] = omega_fg * omega_bg * pow(mu_fg - mu_bg, 2); - } - - // Find the threshold corresponding to the maximum sigma squared - int optimalThreshold = - std::max_element(sigma2_list.begin(), sigma2_list.end()) - - sigma2_list.begin(); - - // Apply the threshold to the image - // cv::Mat binaryImage; - cv::threshold(filtered, filtered, optimalThreshold, 255, - cv::THRESH_BINARY); +void MedianBinary::apply_filter(const cv::Mat& original, + cv::Mat& filtered) const { + apply_median(original, filtered, this->filter_params.kernel_size); + apply_fixed_threshold(filtered, filtered, this->filter_params.threshold, + this->filter_params.invert); +} - // Apply erosion followed by dilation (opening) - cv::Mat openedImage; - int erosionSize = params.otsu.erosion_size; - cv::Mat erosionKernel = getStructuringElement( - cv::MORPH_CROSS, cv::Size(2 * erosionSize + 1, 2 * erosionSize + 1), - cv::Point(erosionSize, erosionSize)); - cv::erode(filtered, filtered, erosionKernel); - - int dilation_size = params.otsu.dilation_size; - cv::Mat dilationKernel = getStructuringElement( - cv::MORPH_CROSS, - cv::Size(2 * dilation_size + 1, 2 * dilation_size + 1), - cv::Point(dilation_size, dilation_size)); - cv::dilate(filtered, filtered, dilationKernel); +void BinaryThreshold::apply_filter(const cv::Mat& original, + cv::Mat& filtered) const { + CV_Assert(!original.empty()); + + const double thresh = this->filter_params.threshold; + const double maxval = this->filter_params.maxval; + const bool invert = this->filter_params.invert; + + // 1) Ensure single-channel + cv::Mat gray; + if (original.channels() == 1) { + gray = original; + } else { + cv::cvtColor(original, gray, cv::COLOR_BGR2GRAY); } -} -void apply_filter(const std::string& filter, - const FilterParams& params, - const cv::Mat& original, - cv::Mat& filtered) { - if (filter_functions.contains(filter)) { - ((filter_functions.at(filter)))(params, original, filtered); + // Standardize to 8-bit (safe for thresholding) + cv::Mat src8; + if (gray.depth() != CV_8U) { + // Adjust scaling here if grayscale is not already 0–255 + gray.convertTo(src8, CV_8U); } else { - original.copyTo(filtered); // Default to no filter + src8 = gray; } + // Apply fixed threshold + const int type = invert ? cv::THRESH_BINARY_INV : cv::THRESH_BINARY; + cv::threshold(src8, filtered, thresh, maxval, type); +} + +// TODO(New filter): Implement your filter here +void Example::apply_filter(const cv::Mat& original, cv::Mat& filtered) const { + std::string example_str = this->filter_params.example_string; + int example_int = this->filter_params.example_int; + apply_example(original, filtered, example_str, example_int); } diff --git a/image-filtering/src/utilities.cpp b/image-filtering/src/utilities.cpp new file mode 100644 index 0000000..696d4a6 --- /dev/null +++ b/image-filtering/src/utilities.cpp @@ -0,0 +1,327 @@ +#include +#include + +// Apply a given gamma to an 8-bit image using a LUT +void applyGammaLUT(cv::Mat& image, double gamma) { + // Create a lookup table for gamma correction + cv::Mat lookup(1, 256, CV_8U); + uchar* p = lookup.ptr(); + for (int i = 0; i < 256; ++i) { + p[i] = cv::saturate_cast(pow(i / 255.0, gamma) * 255.0); + } + + // Apply the gamma correction using the lookup table + cv::LUT(image, lookup, image); +} + +// Compute a gamma value that pushes the image mean toward mid-gray +double computeAutoGammaFromMean(const cv::Mat& image) { + // Convert the image to grayscale if it's not already + cv::Mat grayImage; + if (image.channels() == 3) { + cv::cvtColor(image, grayImage, cv::COLOR_BGR2GRAY); + } else { + grayImage = image; + } + + // Calculate the mean intensity + cv::Scalar meanIntensity = mean(grayImage); + + // The ideal mean intensity is 128 (midpoint for 8-bit grayscale images) + double idealMean = 128.0; + double currentMean = meanIntensity[0]; + + // Automatically set gamma value based on the mean intensity + double gamma; + if (currentMean > 0) { + gamma = log(idealMean / 255.0) / log(currentMean / 255.0); + } else { + gamma = 1.0; // Default gamma if the image has no intensity + } + + // Ensure gamma is within a reasonable range (e.g., between 0.1 and 3.0) + gamma = std::max(0.1, std::min(gamma, 3.0)); + + return gamma; +} + +// Auto-choose a gamma so dark images get lifted and bright images get toned +// down (expects mono8) +// - It sets the mean intensity to 255/2 ≃ 128 +// - The correction weight makes all the values weaker(<1) or stronger(>1) +void apply_auto_gamma(cv::Mat& image, double correction_weight) { + double gamma = computeAutoGammaFromMean(image) * correction_weight; + applyGammaLUT(image, gamma); +} + +// Convert BGR image to single-channel grayscale using custom B,G,R weights +// weights = (b, g, r), e.g. (0.114f, 0.587f, 0.299f) +void to_weighted_gray(const cv::Mat& bgr, + cv::Mat& gray, + double wB, + double wG, + double wR) { + cv::Matx13f customWeights(wB, wG, wR); + cv::transform(bgr, gray, customWeights); +} + +// Returns the Otsu threshold value chosen by OpenCV (0..255) and outputs the +// thresholded binary image +int apply_otsu(const cv::Mat& gray8u, + cv::Mat& out, + bool invert, + double maxval) { + CV_Assert(gray8u.type() == CV_8UC1 && + "applyOtsu expects 8-bit single-channel input"); + + int ttype = invert ? (cv::THRESH_BINARY_INV | cv::THRESH_OTSU) + : (cv::THRESH_BINARY | cv::THRESH_OTSU); + + double thresh = + cv::threshold(gray8u, out, /*thresh ignored*/ 0.0, maxval, ttype); + return static_cast(std::round(thresh)); +} + +// Basic erosion +void apply_erosion(const cv::Mat& src, cv::Mat& filtered, int size, int shape) { + cv::Mat kernel = cv::getStructuringElement( + shape, cv::Size(2 * size + 1, 2 * size + 1), cv::Point(size, size)); + cv::erode(src, filtered, kernel); +} + +// Basic dilation +void apply_dilation(const cv::Mat& src, cv::Mat& dst, int size, int shape) { + cv::Mat kernel = cv::getStructuringElement( + shape, cv::Size(2 * size + 1, 2 * size + 1), cv::Point(size, size)); + cv::dilate(src, dst, kernel); +} + +// Median filter that preserves original depth if it's unsupported by +// cv::medianBlur. Supported depths: CV_8U, CV_16U, CV_32F For others (e.g., +// CV_16S, CV_32S, CV_64F) we convert to CV_32F, filter, then convert back. +void apply_median(const cv::Mat& original, cv::Mat& filtered, int kernel_size) { + CV_Assert(!original.empty()); + + // If caller passed 1, just copy + if (kernel_size == 1) { + original.copyTo(filtered); + return; + } + + // Sanitize kernel size: must be odd and >= 3 + if (kernel_size < 3) + kernel_size = 3; + if ((kernel_size & 1) == 0) + ++kernel_size; + + const int depth = original.depth(); + const bool supported = + (depth == CV_8U || depth == CV_16U || depth == CV_32F); + + const cv::Mat* src = &original; + cv::Mat work, out; + + if (!supported) { + // Convert unsupported depths to CV_32F to avoid clipping (better than + // going to 8U). + original.convertTo(work, CV_32F); + src = &work; + } + + // Do the median blur (OpenCV supports multi-channel for these depths) + cv::medianBlur(*src, out, kernel_size); + + // Convert back to original depth if we promoted + if (!supported) { + out.convertTo(filtered, depth); + } else { + filtered = std::move(out); + } +} + +// Apply a fixed binary threshold. +// - Accepts grayscale or color input (auto-converts to gray). +// - Ensures 8-bit depth for thresholding. +// - Returns a 0/255 mask (CV_8U). +// - Set `invert=true` to get white background & black foreground. +void apply_fixed_threshold(const cv::Mat& img, + cv::Mat& filtered, + int thresh, + bool invert) { + if (img.empty()) { + throw std::invalid_argument( + "applyFixedThreshold: input image is empty"); + } + if (thresh < 0 || thresh > 255) { + throw std::out_of_range( + "applyFixedThreshold: thresh must be in [0, 255]"); + } + + // Convert to grayscale + cv::Mat gray; + if (img.channels() == 3 || img.channels() == 4) { + cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY); + } else { + gray = img; + } + + // Ensure 8-bit + if (gray.depth() != CV_8U) { + cv::Mat tmp; + cv::normalize(gray, tmp, 0, 255, cv::NORM_MINMAX); + tmp.convertTo(gray, CV_8U); + } + + // Threshold + int type = invert ? (cv::THRESH_BINARY_INV) : (cv::THRESH_BINARY); + cv::threshold(gray, filtered, thresh, 255, type); +} + +// Takes a strict binary obstacle mask (0/255) and returns a float image +// where each pixel is the distance (in pixels) to the nearest obstacle. +// Requirements: +// - binObstacles: single-channel CV_8U with values in {0, 255} only. +// - obstaclesAreWhite: +// true -> obstacles = 255, free = 0 +// false -> obstacles = 0, free = 255 +// Output: +// - dist: CV_32F distances to nearest obstacle (0 at obstacle pixels). +// Params: +// - type: CV_DIST_L1, CV_DIST_L2, CV_DIST_C, ... +// - maskSize: 3, 5, or CV_DIST_MASK_PRECISE (0) +// Best regards, ChatGPT +void distance_field(const cv::Mat& binObstacles, + cv::Mat& dist, + bool obstaclesAreWhite, + int type, + int maskSize) { + if (binObstacles.empty()) + throw std::invalid_argument("distance_field_binary: input is empty"); + + if (binObstacles.channels() != 1) + throw std::invalid_argument( + "distance_field_binary: input must be single-channel"); + + if (binObstacles.depth() != CV_8U) + throw std::invalid_argument( + "distance_field_binary: input must be CV_8U (0/255)"); + + // Validate strict binary: values must be only 0 or 255 + { + cv::Mat notZero = (binObstacles != 0); + cv::Mat not255 = (binObstacles != 255); + cv::Mat invalid = + notZero & not255; // pixels that are neither 0 nor 255 + if (cv::countNonZero(invalid) > 0) + throw std::invalid_argument( + "distance_field_binary: input must contain only 0 or 255 " + "values"); + } + + // OpenCV distanceTransform computes distance TO the nearest ZERO pixel + // for NON-ZERO regions. We want distance FROM obstacles. + // Build freeMask (255 = free, 0 = obstacles). + cv::Mat freeMask; + if (obstaclesAreWhite) { + // obstacles=255 -> free=0, invert to get free=255 + freeMask = 255 - binObstacles; + } else { + // obstacles=0, free=255 already correct + freeMask = binObstacles; + } + + if (!(maskSize == 3 || maskSize == 5 || maskSize == cv::DIST_MASK_PRECISE)) + maskSize = 3; + + cv::distanceTransform(freeMask, dist, type, maskSize); // dist is CV_32F + + const float cap = 100.f; // pixel + cv::Mat clipped; + cv::min(dist, cap, clipped); + clipped.convertTo(dist, CV_8U, 255.0f / cap); + // publish vis8u as "mono8" +} + +// TODO(New filter): If you need a helper function define it here like this + +void apply_example(const cv::Mat& original, + cv::Mat& filtered, + std::string example_string, + int example_int) { + filtered = original.clone(); + + // Two centered lines: number above string + const std::string line1 = std::to_string(example_int); + const std::string line2 = example_string; + + // Text style + const int fontFace = cv::FONT_HERSHEY_SIMPLEX; + const double fontScale = 1.0; + const int thickness = 2; + const int lineType = + cv::LINE_AA; // smoother; use cv::LINE_8 for hard edges + const int lineGapPx = 10; // vertical gap between lines in pixels + + // Measure both lines + int base1 = 0, base2 = 0; + const cv::Size sz1 = + cv::getTextSize(line1, fontFace, fontScale, thickness, &base1); + const cv::Size sz2 = + cv::getTextSize(line2, fontFace, fontScale, thickness, &base2); + + // Total block size (approx). Heights don't include baseline, so we add + // baselines for safety. + const int blockW = std::max(sz1.width, sz2.width); + const int blockH = (sz1.height + base1) + lineGapPx + (sz2.height + base2); + + // Top-left of the text block so the whole block is centered + const int blockX = (filtered.cols - blockW) / 2; + const int blockY = (filtered.rows - blockH) / 2; + + // Baseline positions for each line (y is baseline in putText) + const int x1 = blockX + (blockW - sz1.width) / 2; + const int y1 = blockY + sz1.height; // baseline ~ top + height + + const int x2 = blockX + (blockW - sz2.width) / 2; + const int y2 = y1 + base1 + lineGapPx + sz2.height; + + // Clamp to keep text inside image if needed + auto clamp = [](int v, int lo, int hi) { + return std::max(lo, std::min(v, hi)); + }; + + const int x1c = clamp(x1, 0, std::max(0, filtered.cols - sz1.width)); + const int y1c = + clamp(y1, sz1.height, std::max(sz1.height, filtered.rows - base1)); + + const int x2c = clamp(x2, 0, std::max(0, filtered.cols - sz2.width)); + const int y2c = + clamp(y2, sz2.height, std::max(sz2.height, filtered.rows - base2)); + + // Draw white text on mono8 + cv::putText(filtered, line1, cv::Point(x1c, y1c), fontFace, fontScale, + cv::Scalar(255), thickness, lineType); + + cv::putText(filtered, line2, cv::Point(x2c, y2c), fontFace, fontScale, + cv::Scalar(255), thickness, lineType); +} + +/* + _.-. + / 66\ + ( `\ Hi, how you doing :) + |\\ , ,| + __ | \\____/ + ,.--"`-.". / `---' + _.-' '-/ | + _.-" | '-. |_/_ + ,__.-' _,.--\ \ (( /-\ + ',_..--' `\ \ \\_ / + `-, ) |\' + | |-.,,-" ( + | | `\ `',_ + ) \ \,(\(\-' + \ `-,_ + \_(\-(\`-` + " " +*/