Skip to content

Commit 7eb946c

Browse files
committed
Merge branch 'feat/openpose_python' into fix/publish_save_openface
2 parents 660eaba + a7c9a3a commit 7eb946c

File tree

13 files changed

+396
-172
lines changed

13 files changed

+396
-172
lines changed
Lines changed: 8 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -1,72 +1,14 @@
11
cmake_minimum_required(VERSION 2.8.3)
22
project(image_recognition_openpose)
33

4-
## Add support for C++11, supported in ROS Kinetic and newer
5-
add_definitions(-std=c++11)
4+
find_package(catkin REQUIRED)
65

7-
find_package(catkin REQUIRED COMPONENTS
8-
roscpp
9-
cv_bridge
10-
sensor_msgs
11-
image_recognition_msgs
12-
diagnostic_updater
13-
)
14-
15-
catkin_package(
16-
CATKIN_DEPENDS roscpp cv_bridge sensor_msgs image_recognition_msgs diagnostic_updater
17-
)
18-
19-
set(OPENPOSE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/openpose)
20-
IF(EXISTS ${OPENPOSE_DIR})
21-
message("Using openpose from " ${OPENPOSE_DIR} " ...")
22-
23-
set(CAFFE_DIR ${OPENPOSE_DIR}/3rdparty/caffe)
24-
25-
set(CAFFE_INCLUDE_DIRS ${CAFFE_DIR}/distribute/include)
26-
set(CAFFE_LINK_LIBRARY ${CAFFE_DIR}/distribute/lib/libcaffe.so)
27-
28-
set(OPENPOSE_INCLUDE_DIRS ${OPENPOSE_DIR}/include openpose/src /usr/local/cuda/include)
29-
set(OPENPOSE_LINK_LIBRARY ${OPENPOSE_DIR}/build/lib/libopenpose.so)
30-
31-
add_definitions(-DUSE_CAFFE)
32-
33-
include_directories(
34-
${catkin_INCLUDE_DIRS}
35-
${CAFFE_INCLUDE_DIRS}
36-
${OPENPOSE_INCLUDE_DIRS}
37-
)
6+
catkin_python_setup()
387

39-
add_library(openpose_wrapper src/openpose_wrapper.cpp)
8+
catkin_package()
409

41-
target_link_libraries(openpose_wrapper
42-
${CAFFE_LINK_LIBRARY}
43-
${OPENPOSE_LINK_LIBRARY}
44-
${catkin_LIBRARIES}
45-
)
46-
47-
add_executable(${PROJECT_NAME}_test test/test.cpp)
48-
target_link_libraries(${PROJECT_NAME}_test openpose_wrapper)
49-
else()
50-
message(WARNING "Openpose folder not found in " ${OPENPOSE_DIR} " using a mock instead ...")
51-
52-
include_directories(${catkin_INCLUDE_DIRS})
53-
add_library(openpose_wrapper src/openpose_wrapper_mock.cpp)
54-
55-
target_link_libraries(openpose_wrapper ${catkin_LIBRARIES})
56-
endif()
57-
58-
# depend on generated messages
59-
add_dependencies(openpose_wrapper ${catkin_EXPORTED_TARGETS})
60-
61-
add_executable(${PROJECT_NAME}_node src/openpose_ros_node.cpp)
62-
63-
# Specify libraries to link a library or executable target against
64-
target_link_libraries(${PROJECT_NAME}_node openpose_wrapper)
65-
66-
install(TARGETS
67-
${PROJECT_NAME}_node
68-
openpose_wrapper
69-
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
70-
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
71-
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
72-
)
10+
install(PROGRAMS
11+
scripts/detect_poses
12+
scripts/openpose_node
13+
DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
14+
)

image_recognition_openpose/README.md

Lines changed: 63 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -2,49 +2,85 @@
22

33
ROS Wrapper for openpose https://github.com/CMU-Perceptual-Computing-Lab/openpose
44

5-
## Description
6-
Provides a service interface for openpose. Returns the skeleton when an image is send
5+
## Installation notes
76

8-
## Installation
9-
ROS Kinetic uses OpenCV 3.2 as default. Therefore it is important to compile openpose and caffe against OpenCV 3.2 as well
7+
This ROS wrapper makes use of the [Openpose python interface](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/modules/python_module.md).
8+
Please follow the [installation manual](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/installation.md) and ensure that the `BUILD_PYTHON` flag is turned on while running CMake.
109

11-
#### A simple example using the opencv 3.2 version distributed by ROS kinetic:
12-
```
13-
sudo apt remove opencv* libopencv*
14-
sudo apt install ros-kinetic-opencv3
15-
16-
sudo ln -fs /opt/ros/kinetic/lib/libopencv_core3.so /usr/lib/libopencv_core.so
17-
sudo ln -fs /opt/ros/kinetic/lib/libopencv_highgui3.so /usr/lib/libopencv_highgui.so
18-
sudo ln -fs /opt/ros/kinetic/lib/libopencv_imgcodecs3.so /usr/lib/libopencv_imgcodecs.so
19-
sudo ln -fs /opt/ros/kinetic/lib/libopencv_imgproc3.so /usr/lib/libopencv_imgproc.so
20-
sudo ln -fs /opt/ros/kinetic/lib/libopencv_videoio3.so /usr/lib/libopencv_videoio.so
21-
sudo ln -fs /opt/ros/kinetic/lib/libopencv_objdetect3.so /usr/lib/libopencv_objdetect.so
22-
sudo ln -fs /opt/ros/kinetic/include/opencv-3.2.0-dev/opencv2 /usr/include/opencv2
23-
```
10+
## Scripts
2411

25-
Next compile openpose using the [openpose installation manual](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/installation.md)
12+
### detect_poses
2613

27-
Make sure at the end a symbolic link is added to the ROS package, for example if the openpose folder is in your home dir:
28-
```
29-
roscd image_recognition_openpose
30-
ln -s ~/openpose
14+
Example for the following picture:
15+
16+
![Example](doc/example.jpg)
17+
18+
```bash
19+
export MODEL_FOLDER=~/dev/openpose/models
20+
rosrun image_recognition_openpose detect_poses $MODEL_FOLDER image `rospack find image_recognition_openpose`/doc/example.jpg
3121
```
3222

33-
If the symbolic link is not present a mock node will be used for testing.
23+
Output:
24+
25+
![Example result](doc/example_result.jpg)
26+
27+
It also works with a webcam stream, usage:
28+
29+
```bash
30+
usage: detect_poses [-h] [--pose_model POSE_MODEL]
31+
[--net_input_size NET_INPUT_SIZE]
32+
[--net_output_size NET_OUTPUT_SIZE]
33+
[--num_scales NUM_SCALES] [--scale_gap SCALE_GAP]
34+
[--num_gpu_start NUM_GPU_START]
35+
[--overlay_alpha OVERLAY_ALPHA]
36+
[--python_path PYTHON_PATH]
37+
model_folder {image,cam} ...
38+
39+
Detect poses in an image
40+
41+
positional arguments:
42+
model_folder Path where the models are stored
43+
{image,cam} Mode
44+
image Use image mode
45+
cam Use cam mode
46+
47+
optional arguments:
48+
-h, --help show this help message and exit
49+
--pose_model POSE_MODEL
50+
What pose model to use (default: BODY_25)
51+
--net_input_size NET_INPUT_SIZE
52+
Net input size (default: -1x368)
53+
--net_output_size NET_OUTPUT_SIZE
54+
Net output size (default: -1x-1)
55+
--num_scales NUM_SCALES
56+
Num scales (default: 1)
57+
--scale_gap SCALE_GAP
58+
Scale gap (default: 0.3)
59+
--num_gpu_start NUM_GPU_START
60+
What GPU support (default: 0)
61+
--overlay_alpha OVERLAY_ALPHA
62+
Overlay alpha for the output image (default: 0.6)
63+
--python_path PYTHON_PATH
64+
Python path where Openpose is stored (default:
65+
/usr/local/python/)
66+
```
3467

35-
(After creating the symlink, do not forget to clean first)
68+
### openpose_node
3669

3770
## How-to
3871

3972
Run the image_recognition_openpose node in one terminal, e.g.:
4073

41-
rosrun image_recognition_openpose image_recognition_openpose_node _net_input_width:=368 _net_input_height:=368 _net_output_width:=368 _net_output_height:=368 _model_folder:=/home/ubuntu/openpose/models/
74+
```bash
75+
export MODEL_FOLDER=~/dev/openpose/models
76+
rosrun image_recognition_openpose openpose_node _model_folder:=$MODEL_FOLDER
77+
```
4278

4379
Next step is starting the image_recognition_Rqt test gui (https://github.com/tue-robotics/image_recognition_rqt)
4480

4581
rosrun image_recognition_rqt test_gui
46-
47-
Again configure the service you want to call with the gear-wheel in the top-right corner of the screen. If everything is set-up, draw a rectangle in the image and ask the service for detections:
82+
83+
Configure the service you want to call with the gear-wheel in the top-right corner of the screen. If everything is set-up, draw a rectangle in the image and ask the service for detections:
4884

4985
![Test](doc/openpose.png)
5086

99.2 KB
Loading
63.2 KB
Loading

image_recognition_openpose/package.xml

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,17 @@
22
<package format="2">
33
<name>image_recognition_openpose</name>
44
<version>0.0.0</version>
5-
<description>The image_recognition_openpose package</description>
5+
<description>ROS Wrapper for the openpose software package. Exposes a service and topic interface.</description>
66

77
<maintainer email="[email protected]">Rein Appeldoorn</maintainer>
88

99
<license>MIT</license>
1010

1111
<buildtool_depend>catkin</buildtool_depend>
1212

13-
<depend>cv_bridge</depend>
14-
<depend>diagnostic_updater</depend>
15-
<depend>image_recognition_msgs</depend>
16-
<depend>libopencv-dev</depend>
17-
<depend>roscpp</depend>
18-
<depend>sensor_msgs</depend>
13+
<exec_depend>cv_bridge</exec_depend>
14+
<exec_depend>diagnostic_updater</exec_depend>
15+
<exec_depend>image_recognition_msgs</exec_depend>
16+
<exec_depend>rospy</exec_depend>
17+
<exec_depend>sensor_msgs</exec_depend>
1918
</package>
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
#!/usr/bin/env python
2+
3+
import argparse
4+
import logging
5+
import sys
6+
7+
import cv2
8+
9+
from image_recognition_openpose import OpenposeWrapper
10+
11+
parser = argparse.ArgumentParser(description='Detect poses in an image',
12+
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
13+
parser.add_argument('model_folder', help='Path where the models are stored')
14+
parser.add_argument('--pose_model', help='What pose model to use', default="BODY_25")
15+
parser.add_argument('--net_input_size', help='Net input size', default="-1x368")
16+
parser.add_argument('--net_output_size', help='Net output size', default="-1x-1")
17+
parser.add_argument('--num_scales', type=int, help='Num scales', default=1)
18+
parser.add_argument('--scale_gap', type=float, help='Scale gap', default=0.3)
19+
parser.add_argument('--num_gpu_start', type=int, help='What GPU support', default=0)
20+
parser.add_argument('--overlay_alpha', type=float, help='Overlay alpha for the output image', default=0.6)
21+
parser.add_argument('--python_path', help='Python path where Openpose is stored', default='/usr/local/python/')
22+
23+
mode_parser = parser.add_subparsers(help='Mode')
24+
image_parser = mode_parser.add_parser('image', help='Use image mode')
25+
image_parser.set_defaults(mode='image')
26+
cam_parser = mode_parser.add_parser('cam', help='Use cam mode')
27+
cam_parser.set_defaults(mode='cam')
28+
29+
# Image specific arguments
30+
image_parser.add_argument('image', help='Input image')
31+
32+
args = parser.parse_args()
33+
34+
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
35+
36+
wrapper = OpenposeWrapper(args.model_folder, args.pose_model, args.net_input_size, args.net_output_size,
37+
args.num_scales, args.scale_gap, args.num_gpu_start, args.overlay_alpha, args.python_path)
38+
39+
if args.mode == 'image':
40+
# Read the image
41+
image = cv2.imread(args.image)
42+
recognitions, overlayed_image = wrapper.detect_poses(image)
43+
44+
logging.info(recognitions)
45+
cv2.imshow("overlayed_image", overlayed_image)
46+
47+
cv2.waitKey()
48+
elif args.mode == 'cam':
49+
cap = cv2.VideoCapture(0)
50+
while True:
51+
ret, img = cap.read()
52+
recognitions, overlayed_image = wrapper.detect_poses(img)
53+
cv2.imshow("overlayed_image", overlayed_image)
54+
55+
if cv2.waitKey(1) & 0xFF == ord('q'):
56+
break

0 commit comments

Comments
 (0)