Skip to content

Commit 06a18c8

Browse files
Feature/refactoring yolo (#4)
2 parents 22da61d + 8e69019 commit 06a18c8

File tree

11 files changed

+400
-281
lines changed

11 files changed

+400
-281
lines changed

.github/workflows/main.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
name: CI
22

3-
on: [push, pull_request]
3+
on: [push, pull_request, workflow_dispatch]
44

55
jobs:
66
tue-ci:

.gitignore

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
build/
2+
images/*
3+
onnxruntime*/
4+
onnxruntime/*
5+
docker/*
6+
CMakefile
7+
CMakeCache.txt
8+
CMakeFiles/*
9+
cmake_install.cmake
10+
Makefile
11+
SPEED-SAM-C-TENSORRT/
12+
sam_inference/model/FastSAM-x.onnx
13+
mask*
14+
segmentation_results*
15+
16+
# Models
17+
*.onnx

CMakeLists.txt

Lines changed: 11 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
cmake_minimum_required(VERSION 3.5)
1+
cmake_minimum_required(VERSION 3.14)
2+
23
project(yolo_onnx_ros)
34

45
add_compile_options(-Wall -Werror=all)
@@ -7,22 +8,21 @@ add_compile_options(-Wextra -Werror=extra)
78
# -------------- Support C++17 for using filesystem ------------------#
89
set(CMAKE_CXX_STANDARD 17)
910
set(CMAKE_CXX_STANDARD_REQUIRED ON)
10-
set(CMAKE_CXX_EXTENSIONS ON)
11-
#set(CMAKE_INCLUDE_CURRENT_DIR ON)
1211

13-
# -------------- OpenCV ------------------#
14-
find_package(OpenCV REQUIRED)
15-
include_directories(${OpenCV_INCLUDE_DIRS})
1612

13+
find_package(OpenCV REQUIRED)
1714
find_package(catkin REQUIRED
1815
COMPONENTS
1916
onnxruntime_ros
2017
)
2118

2219

2320
set(${PROJECT_NAME}_CUDA_ENABLED ${onnxruntime_ros_CUDA_ENABLED})
24-
if(onnxruntime_ros_CUDA_ENABLED)
21+
if(${PROJECT_NAME}_CUDA_ENABLED)
2522
find_package(CUDAToolkit REQUIRED)
23+
set(${PROJECT_NAME}_CUDA_CATKIN_DEPENDS "CUDAToolkit")
24+
set(${PROJECT_NAME}_CUDA_INCLUDE_DIRS "${CUDAToolkit_INCLUDE_DIRS}")
25+
set(${PROJECT_NAME}_CUDA_TARGET_LINK_LIBRARIES "CUDA::cudart")
2626
endif()
2727

2828
configure_file(include/${PROJECT_NAME}/config.hpp.in ${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_INCLUDE_DESTINATION}/${PROJECT_NAME}/config.hpp)
@@ -38,10 +38,9 @@ catkin_package(
3838
INCLUDE_DIRS include ${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_INCLUDE_DESTINATION}
3939
LIBRARIES ${PROJECT_NAME}
4040
CATKIN_DEPENDS
41-
DEPENDS CUDAToolkit OpenCV
41+
DEPENDS ${${PROJECT_NAME}_CUDA_CATKIN_DEPENDS} OpenCV
4242
)
4343

44-
4544
# ------------------------------------------------------------------------------------------------
4645
# BUILD
4746
# ------------------------------------------------------------------------------------------------
@@ -50,33 +49,23 @@ include_directories(
5049
include
5150
${CATKIN_DEVEL_PREFIX}/${CATKIN_GLOBAL_INCLUDE_DESTINATION}
5251
SYSTEM
53-
${CUDAToolkit_INCLUDE_DIRS}
52+
${${PROJECT_NAME}_CUDA_INCLUDE_DIRS}
5453
${OpenCV_INCLUDE_DIRS}
5554
${catkin_INCLUDE_DIRS}
5655
)
5756

58-
# -------------- ONNXRuntime ------------------#
59-
6057
add_library(${PROJECT_NAME}
58+
src/detection.cpp
6159
src/yolo_inference.cpp
6260
)
61+
target_link_libraries(${PROJECT_NAME} ${${PROJECT_NAME}_CUDA_TARGET_LINK_LIBRARIES} ${OpenCV_LIBRARIES} ${catkin_LIBRARIES})
6362
# add_dependencies(${PROJECT_NAME} generate_config_hpp)
64-
target_link_libraries(${PROJECT_NAME} CUDA::toolkit ${OpenCV_LIBRARIES} ${catkin_LIBRARIES})
6563

6664
add_executable(test_${PROJECT_NAME}
6765
src/main.cpp
6866
)
6967
target_link_libraries(test_${PROJECT_NAME} ${PROJECT_NAME} ${OpenCV_LIBRARIES} ${catkin_LIBRARIES})
7068

71-
# For Windows system, copy onnxruntime.dll to the same folder of the executable file
72-
if (WIN32)
73-
add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD
74-
COMMAND ${CMAKE_COMMAND} -E copy_if_different
75-
"${ONNXRUNTIME_ROOT}/lib/onnxruntime.dll"
76-
$<TARGET_FILE_DIR:${PROJECT_NAME}>)
77-
endif ()
78-
79-
8069
# ------------------------------------------------------------------------------------------------
8170
# INSTALL
8271
# ------------------------------------------------------------------------------------------------
@@ -104,15 +93,3 @@ install(
10493
${PROJECT_NAME}
10594
DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
10695
)
107-
108-
# Download https://raw.githubusercontent.com/ultralytics/ultralytics/main/ultralytics/cfg/datasets/coco.yaml
109-
# and put it in the same folder of the executable file
110-
configure_file(data/coco.yaml ${CMAKE_CURRENT_BINARY_DIR}/coco.yaml COPYONLY)
111-
112-
# Copy yolov8n.onnx file to the same folder of the executable file
113-
# configure_file(model/yolo11m.onnx ${CMAKE_CURRENT_BINARY_DIR}/yolo11m.onnx COPYONLY)
114-
115-
# # Create folder name images in the same folder of the executable file
116-
# add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD
117-
# COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/images
118-
# )

README.md

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22

33
<img alt="C++" src="https://img.shields.io/badge/C++-17-blue.svg?style=flat&logo=c%2B%2B"> <img alt="Onnx-runtime" src="https://img.shields.io/badge/OnnxRuntime-717272.svg?logo=Onnx&logoColor=white">
44

5-
This example demonstrates how to perform inference using YOLOv8 in C++ with ONNX Runtime and OpenCV's API.
5+
6+
7+
This algorithm is inspired by [Ultralitics](https://github.com/ultralytics/ultralytics/tree/main/examples/YOLOv8-ONNXRuntime-CPP) implementation to perform inference using YOLOv8 (we also supports v11) in C++ with ONNX Runtime and OpenCV's API.
68

79
## Benefits ✨
810

@@ -57,12 +59,12 @@ In order to run example, you also need to download coco.yaml. You can download t
5759
| OpenCV | >=4.0.0 |
5860
| C++ Standard | >=17 |
5961
| Cmake | >=3.5 |
60-
| Cuda (Optional) | >=11.4 \<12.0 |
61-
| cuDNN (Cuda required) | =8 |
62+
| Cuda (Optional) | =12.8 |
63+
| cuDNN (Cuda required) | =9 |
6264

6365
Note: The dependency on C++17 is due to the usage of the C++17 filesystem feature.
6466

65-
Note (2): Due to ONNX Runtime, we need to use CUDA 11 and cuDNN 8. Keep in mind that this requirement might change in the future.
67+
Note (2): Due to ONNX Runtime, we need to use CUDA 12.8 and cuDNN 9. Keep in mind that this requirement might change in the future.
6668

6769
## Build 🛠️
6870

@@ -87,12 +89,8 @@ Note (2): Due to ONNX Runtime, we need to use CUDA 11 and cuDNN 8. Keep in mind
8789
If you encounter an error indicating that the `ONNXRUNTIME_ROOT` variable is not set correctly, you can resolve this by building the project using the appropriate command tailored to your system.
8890

8991
```console
90-
# compiled in a win32 system
91-
cmake -D WIN32=TRUE ..
9292
# compiled in a linux system
9393
cmake -D LINUX=TRUE ..
94-
# compiled in an apple system
95-
cmake -D APPLE=TRUE ..
9694
```
9795

9896
5. Build the project:
@@ -104,9 +102,15 @@ Note (2): Due to ONNX Runtime, we need to use CUDA 11 and cuDNN 8. Keep in mind
104102
6. The built executable should now be located in the `build` directory.
105103

106104
## Usage 🚀
107-
105+
To run from main just run the executable.
106+
To run the detector on you C++ application:
108107
```c++
109-
//change your param as you like
108+
//To run the detector add on you C++ application:
109+
std::vector<DL_RESULT> results;
110+
std::unique_ptr<YOLO_V8> yoloDetector = Initialize();
111+
results = DetectObjects(yoloDetector, img);
112+
113+
//You can change your param as you like (inside the Initialize() function)
110114
//Pay attention to your device and the onnx model type(fp32 or fp16)
111115
DL_INIT_PARAM params;
112116
params.rectConfidenceThreshold = 0.1;
Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
#ifndef YOLO_ONNX_ROS_CONFIG_HPP_
22
#define YOLO_ONNX_ROS_CONFIG_HPP_
33

4-
//Set which version of the Tree Interface to use
5-
#define YOLO_ONNX_ROS_CUDA_ENABLED @onnx_yolo_ros_CUDA_ENABLED@
4+
#define YOLO_ONNX_FALSE 0
5+
#define YOLO_ONNX_TRUE 1
6+
#define YOLO_ONNX_ROS_CUDA_ENABLED YOLO_ONNX_@yolo_onnx_ros_CUDA_ENABLED@
67

78
#endif //#define YOLO_ONNX_ROS_CONFIG_HPP_
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
#include "yolo_onnx_ros/yolo_inference.hpp"
2+
3+
std::tuple<std::unique_ptr<YOLO_V8>, DL_INIT_PARAM> Initialize();
4+
std::vector<DL_RESULT> Detector(std::unique_ptr<YOLO_V8>& p, const cv::Mat& img);
5+
int ReadCocoYaml(std::unique_ptr<YOLO_V8>& p);

include/yolo_onnx_ros/yolo_inference.h renamed to include/yolo_onnx_ros/yolo_inference.hpp

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,16 @@
11
#pragma once
22

3-
#define RET_OK nullptr
4-
5-
#ifdef _WIN32
6-
#include <Windows.h>
7-
#include <direct.h>
8-
#include <io.h>
9-
#endif
3+
#define RET_OK nullptr
104

115
#include <string>
126
#include <vector>
137
#include <cstdio>
148
#include <opencv2/opencv.hpp>
15-
#include <onnxruntime_cxx_api.h>
9+
#include "onnxruntime_cxx_api.h"
1610

1711
#include <yolo_onnx_ros/config.hpp>
1812

19-
#ifdef YOLO_ONNX_ROS_CUDA_ENABLED
13+
#if defined(YOLO_ONNX_ROS_CUDA_ENABLED) && YOLO_ONNX_ROS_CUDA_ENABLED
2014
#include <cuda_fp16.h>
2115
#endif
2216

@@ -61,14 +55,16 @@ typedef struct _DL_RESULT
6155
class YOLO_V8
6256
{
6357
public:
64-
YOLO_V8();
58+
YOLO_V8() = default;
6559

66-
~YOLO_V8();
60+
~YOLO_V8() = default;
6761

6862
public:
6963
const char* CreateSession(DL_INIT_PARAM& iParams);
7064

7165
const char* RunSession(const cv::Mat& iImg, std::vector<DL_RESULT>& oResult);
66+
// imgSize is [width, height]
67+
char* PreProcess(const cv::Mat& iImg, const std::vector<int>& iImgSize, cv::Mat& oImg);
7268

7369
std::vector<std::string> classes{};
7470

@@ -77,15 +73,14 @@ class YOLO_V8
7773

7874
// Note: The logic is on the .cpp file since its a private method.
7975
template<typename N>
80-
char* TensorProcess(clock_t& starttime_1, N& blob, std::vector<int64_t>& inputNodeDims,
81-
std::vector<DL_RESULT>& oResult);
76+
char* TensorProcess(clock_t& starttime_1, const cv::Mat& iImg, N& blob, std::vector<int64_t>& inputNodeDims,
77+
std::vector<DL_RESULT>& oResult);
8278

83-
char* PreProcess(const cv::Mat& iImg, std::vector<int> iImgSize, cv::Mat& oImg);
8479

8580
Ort::Env env_;
8681
std::unique_ptr<Ort::Session> session_;
8782
bool cudaEnable_;
88-
Ort::RunOptions options_;
83+
Ort::RunOptions options;
8984
std::vector<const char*> inputNodeNames_;
9085
std::vector<const char*> outputNodeNames_;
9186

0 commit comments

Comments
 (0)