Skip to content

Commit 687b8ea

Browse files
committed
feat(Pose): Add YOLO Pose Estimation inference module with C++ and Python bindings, include example (#58)
close #58
1 parent 7922d89 commit 687b8ea

File tree

18 files changed

+1143
-93
lines changed

18 files changed

+1143
-93
lines changed

examples/pose/CMakeLists.txt

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
# 设置项目
2+
cmake_minimum_required(VERSION 3.15.0)
3+
cmake_policy(SET CMP0091 NEW)
4+
cmake_policy(SET CMP0146 OLD)
5+
project(pose LANGUAGES CXX CUDA)
6+
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
7+
8+
# 设置 C++ 标准
9+
set(CMAKE_CXX_STANDARD 17)
10+
set(CMAKE_CXX_STANDARD_REQUIRED ON)
11+
12+
# 添加依赖项
13+
find_package(CUDA REQUIRED)
14+
set(CMAKE_CUDA_ARCHITECTURES native)
15+
set(CUDA_PATH ${CUDA_TOOLKIT_ROOT_DIR})
16+
17+
find_package(OpenCV REQUIRED)
18+
19+
# 添加编译选项
20+
option(TENSORRT_PATH "TensorRT Path. Example: /usr/local/tensorrt" "")
21+
if(NOT DEFINED TENSORRT_PATH)
22+
message(FATAL_ERROR "TensorRT path is not set. Please specify the TensorRT path.")
23+
endif()
24+
25+
option(DEPLOY_PATH "TensorRT-YOLO Project Path." "")
26+
if(NOT DEFINED DEPLOY_PATH)
27+
message(FATAL_ERROR "TensorRT-YOLO project path is not set. Please specify the TensorRT-YOLO Project path.")
28+
endif()
29+
30+
# 添加可执行文件
31+
add_executable(pose "")
32+
33+
# 包含头文件目录
34+
target_include_directories(pose PRIVATE
35+
${CUDA_INCLUDE_DIRS}
36+
${TENSORRT_PATH}/include
37+
${OpenCV_INCLUDE_DIRS}
38+
${DEPLOY_PATH}/include
39+
)
40+
41+
# 链接库目录
42+
target_link_directories(pose PRIVATE
43+
${TENSORRT_PATH}/lib
44+
${DEPLOY_PATH}/lib
45+
)
46+
47+
# 私有源文件
48+
target_sources(pose PRIVATE pose.cpp)
49+
50+
# 私有编译定义
51+
target_compile_definitions(pose PRIVATE ${CUDA_DEFINITIONS})
52+
53+
# 私有链接库
54+
target_link_libraries(pose PRIVATE
55+
${CUDA_cudart_LIBRARY}
56+
${OpenCV_LIBS}
57+
deploy
58+
)
59+
60+
# 根据平台选择不同的 TensorRT 库
61+
if(MSVC AND EXISTS ${TENSORRT_PATH}/lib/nvinfer_10.dll)
62+
target_link_libraries(pose PRIVATE nvinfer_10 nvinfer_plugin_10 nvonnxparser_10)
63+
else()
64+
target_link_libraries(pose PRIVATE nvinfer nvinfer_plugin nvonnxparser)
65+
endif()
66+
67+
# 设置输出目录
68+
set_target_properties(pose PROPERTIES
69+
OUTPUT_NAME "pose"
70+
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}/bin"
71+
)
72+
73+
# 编译选项
74+
if(MSVC)
75+
target_compile_options(pose PRIVATE
76+
$<$<CONFIG:Release>:-O2>
77+
)
78+
set_property(TARGET pose PROPERTY MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
79+
else()
80+
target_compile_options(pose PRIVATE
81+
$<$<COMPILE_LANGUAGE:CXX>:-O3 -flto=auto>
82+
)
83+
target_link_options(pose PRIVATE
84+
$<$<COMPILE_LANGUAGE:CXX>:-O3 -flto=auto>
85+
)
86+
endif()

examples/pose/README.en.md

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
[简体中文](README.md) | English
2+
3+
# Pose Estimation Inference Example
4+
5+
This example uses the yolo11n-pose model to demonstrate how to perform pose estimation inference using the Command Line Interface (CLI), Python, and C++.
6+
7+
[yolo11n-pose.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt)[【TestImages】COCO-Pose-part.zip](https://www.ilanzou.com/s/kBby4w1D)
8+
9+
Please download the required `yolo11n-pose.pt` model file and test images through the provided link, and save the model file to the `models` folder, and place the extracted test images into the `images` folder after unzipping.
10+
11+
## Model Export
12+
13+
> [!IMPORTANT]
14+
>
15+
> If you only want to export the ONNX model (with TensorRT plugins) that can be used for inference in this project through the `tensorrt_yolo` provided Command Line Interface (CLI) tool `trtyolo`, you can install it via [PyPI](https://pypi.org/project/tensorrt-yolo) by simply executing the following command:
16+
>
17+
> ```bash
18+
> pip install -U tensorrt_yolo
19+
> ```
20+
>
21+
> If you want to experience the same inference speed as C++, please refer to [Install-tensorrt_yolo](../../docs/en/build_and_install.md#install-tensorrt_yolo) to build the latest version of `tensorrt_yolo` yourself.
22+
23+
Use the following command to export the ONNX format with the [EfficientRotatedNMS](../../plugin/efficientRotatedNMSPlugin/) plugin. For detailed `trtyolo` CLI export methods, please read [Model Export](../../docs/en/model_export.md):
24+
25+
```bash
26+
trtyolo export -w models/yolo11n-pose.pt -v yolo11 -o models -s
27+
```
28+
29+
After running the above command, a `yolo11n-pose.onnx` file with a `batch_size` of 1 will be generated in the `models` folder. Next, use the `trtexec` tool to convert the ONNX file to a TensorRT engine (fp16):
30+
31+
```bash
32+
trtexec --onnx=models/yolo11n-pose.onnx --saveEngine=models/yolo11n-pose.engine --fp16 --staticPlugins=/path/to/your/TensorRT-YOLO/lib/plugin/libcustom_plugins.so --setPluginsToSerialize=/path/to/your/TensorRT-YOLO/lib/plugin/libcustom_plugins.so
33+
```
34+
35+
## Model Inference
36+
37+
> [!IMPORTANT]
38+
>
39+
> The `tensorrt_yolo` installed via [PyPI](https://pypi.org/project/tensorrt-yolo) only provides the ONNX model (with TensorRT plugins) for inference in this project and does not provide inference capabilities.
40+
> If you want to experience the same inference speed as C++, please refer to [Install-tensorrt_yolo](../../docs/en/build_and_install.md#install-tensorrt_yolo) to build the latest version of `tensorrt_yolo` yourself.
41+
42+
### Inference Using CLI
43+
44+
> [!NOTE]
45+
> The `--cudaGraph` command added from version 4.0 can further accelerate the inference process, but this feature only supports static models.
46+
>
47+
> From version 4.3 and later, support for pose estimation inference is added. The command `-m 2, --mode 2` is used to select the pose estimation.
48+
49+
1. Use the `trtyolo` command-line tool from the `tensorrt_yolo` library for inference. Run the following command to view help information:
50+
51+
```bash
52+
trtyolo infer --help
53+
```
54+
55+
2. Run the following command for inference:
56+
57+
```bash
58+
trtyolo infer -e models/yolo11n-pose.engine -m 3 -i images -o output -l labels.txt --cudaGraph
59+
```
60+
61+
The inference results will be saved in the `output` folder, and a visualization result will be generated.
62+
63+
### Inference Using Python
64+
65+
1. Use the `tensorrt_yolo` library to run the example script `pose.py` for inference.
66+
2. Run the following command for inference:
67+
68+
```bash
69+
python pose.py -e models/yolo11n-pose.engine -i images -o output -l labels.txt --cudaGraph
70+
```
71+
72+
### Inference Using C++
73+
74+
1. Ensure that the project has been compiled according to the [`TensorRT-YOLO` Compilation](../../docs/en/build_and_install.md#tensorrt-yolo-compile).
75+
2. Compile `pose.cpp` into an executable:
76+
77+
```bash
78+
# Compile using xmake
79+
xmake f -P . --tensorrt="/path/to/your/TensorRT" --deploy="/path/to/your/TensorRT-YOLO"
80+
xmake -P . -r
81+
82+
# Compile using cmake
83+
mkdir -p build && cd build
84+
cmake -DTENSORRT_PATH="/path/to/your/TensorRT" -DDEPLOY_PATH="/path/to/your/TensorRT-YOLO" ..
85+
cmake --build . -j8 --config Release
86+
```
87+
88+
After compilation, the executable file will be generated in the `bin` folder of the project root directory.
89+
90+
3. Run the following command for inference:
91+
92+
```bash
93+
cd bin
94+
./pose -e ../models/yolo11n-pose.engine -i ../images -o ../output -l ../labels.txt --cudaGraph
95+
```
96+
97+
Through the above methods, you can successfully complete model inference.

examples/pose/README.md

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
[English](README.en.md) | 简体中文
2+
3+
# 姿态识别推理示例
4+
5+
本示例以 yolo11n-pose 模型为例,展示如何使用命令行界面(CLI)、Python 和 C++ 三种方式进行姿态识别推理。
6+
7+
[yolo11n-pose.pt](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt)[【测试图片】COCO-Pose-part.zip](https://www.ilanzou.com/s/kBby4w1D)
8+
9+
请通过提供的链接下载所需的 `yolo11n-pose.pt` 模型文件和测试图片,并将模型文件保存至 `models` 文件夹,测试图片解压后存放至 `images` 文件夹。
10+
11+
## 模型导出
12+
13+
> [!IMPORTANT]
14+
>
15+
> 如果您仅想通过 `tensorrt_yolo` 提供的命令行界面(CLI)工具 `trtyolo`,导出可供该项目推理的 ONNX 模型(带 TensorRT 插件),可以通过 [PyPI](https://pypi.org/project/tensorrt-yolo) 安装,只需执行以下命令即可:
16+
>
17+
> ```bash
18+
> pip install -U tensorrt_yolo
19+
> ```
20+
>
21+
> 如果想体验与 C++ 同样的推理速度,则请参考 [安装-tensorrt_yolo](../../docs/cn/build_and_install.md#安装-tensorrt_yolo) 自行构建最新版本的 `tensorrt_yolo`
22+
23+
使用以下命令导出带 [EfficientRotatedNMS](../../plugin/efficientRotatedNMSPlugin/) 插件的 ONNX 格式,详细的 `trtyolo` CLI 导出方法请阅读 [模型导出](../../docs/cn/model_export.md):
24+
25+
```bash
26+
trtyolo export -w models/yolo11n-pose.pt -v yolo11 -o models -s
27+
```
28+
29+
运行上述命令后,`models` 文件夹中将生成一个 `batch_size` 为 1 的 `yolo11n-pose.onnx` 文件。接下来,使用 `trtexec` 工具将 ONNX 文件转换为 TensorRT 引擎(fp16):
30+
31+
```bash
32+
trtexec --onnx=models/yolo11n-pose.onnx --saveEngine=models/yolo11n-pose.engine --fp16 --staticPlugins=/path/to/your/TensorRT-YOLO/lib/plugin/libcustom_plugins.so --setPluginsToSerialize=/path/to/your/TensorRT-YOLO/lib/plugin/libcustom_plugins.so
33+
```
34+
35+
## 模型推理
36+
37+
> [!IMPORTANT]
38+
>
39+
> 通过 [PyPI](https://pypi.org/project/tensorrt-yolo) 安装的 `tensorrt_yolo` 仅提供可供该项目推理的 ONNX 模型(带 TensorRT 插件) 功能,不提供推理功能。
40+
> 如果想体验与 C++ 同样的推理速度,则请参考 [安装-tensorrt_yolo](../../docs/cn/build_and_install.md#安装-tensorrt_yolo) 自行构建最新版本的 `tensorrt_yolo`
41+
42+
### 使用 CLI 进行推理
43+
44+
> [!NOTE]
45+
> 从 4.0 版本开始新增的 `--cudaGraph` 指令可以进一步加速推理过程,但该功能仅支持静态模型。
46+
>
47+
> 从 4.3 以后的版本开始,支持姿态识别,指令 `-m 3, --mode 3` 用于选择姿态识别。
48+
49+
1. 使用 `tensorrt_yolo` 库的 `trtyolo` 命令行工具进行推理。运行以下命令查看帮助信息:
50+
51+
```bash
52+
trtyolo infer --help
53+
```
54+
55+
2. 运行以下命令进行推理:
56+
57+
```bash
58+
trtyolo infer -e models/yolo11n-pose.engine -m 3 -i images -o output -l labels.txt --cudaGraph
59+
```
60+
61+
推理结果将保存至 `output` 文件夹,并生成可视化结果。
62+
63+
### 使用 Python 进行推理
64+
65+
1. 使用 `tensorrt_yolo` 库运行示例脚本 `pose.py`进行推理。
66+
2. 运行以下命令进行推理:
67+
68+
```bash
69+
python pose.py -e models/yolo11n-pose.engine -i images -o output -l labels.txt --cudaGraph
70+
```
71+
72+
### 使用 C++ 进行推理
73+
74+
1. 确保已按照 [`TensorRT-YOLO` 编译](../../docs/cn/build_and_install.md##rensorrt-yolo-编译) 对项目进行编译。
75+
2. 将 `pose.cpp` 编译为可执行文件:
76+
77+
```bash
78+
# 使用 xmake 编译
79+
xmake f -P . --tensorrt="/path/to/your/TensorRT" --deploy="/path/to/your/TensorRT-YOLO"
80+
xmake -P . -r
81+
82+
# 使用 cmake 编译
83+
mkdir -p build && cd build
84+
cmake -DTENSORRT_PATH="/path/to/your/TensorRT" -DDEPLOY_PATH="/path/to/your/TensorRT-YOLO" ..
85+
cmake --build . -j8 --config Release
86+
```
87+
88+
编译完成后,可执行文件将生成在项目根目录的 `bin` 文件夹中。
89+
90+
3. 使用以下命令运行推理:
91+
92+
```bash
93+
cd bin
94+
./pose -e ../models/yolo11n-pose.engine -i ../images -o ../output -l ../labels.txt --cudaGraph
95+
```
96+
97+
通过以上方式,您可以顺利完成模型推理。

examples/pose/images/.keep

Whitespace-only changes.

examples/pose/labels.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
person

examples/pose/models/.keep

Whitespace-only changes.

0 commit comments

Comments
 (0)