Skip to content

Commit 5d5587f

Browse files
authored
Merge pull request #13792 from NHZlX/trt_dy_lib
add trt api lib to paddle_fluid lib
2 parents fd0dd07 + 9445502 commit 5d5587f

File tree

5 files changed

+85
-22
lines changed

5 files changed

+85
-22
lines changed

paddle/fluid/inference/CMakeLists.txt

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,20 +19,27 @@ cc_library(paddle_fluid_origin DEPS ${fluid_modules} paddle_fluid_api)
1919

2020
add_subdirectory(api)
2121

22+
set(STATIC_INFERENCE_APIS paddle_fluid_api paddle_inference_api analysis_predictor)
23+
set(SHARED_INFERENCE_SRCS
24+
io.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api_impl.cc
25+
${CMAKE_CURRENT_SOURCE_DIR}/api/analysis_predictor.cc
26+
${CMAKE_CURRENT_SOURCE_DIR}/api/details/zero_copy_tensor.cc)
27+
if (WITH_GPU AND TENSORRT_FOUND)
28+
set(STATIC_INFERENCE_APIS ${STATIC_INFERENCE_APIS} paddle_inference_tensorrt_subgraph_engine)
29+
set(SHARED_INFERENCE_SRCS ${SHARED_INFERENCE_SRCS} ${CMAKE_CURRENT_SOURCE_DIR}/api/api_tensorrt_subgraph_engine.cc)
30+
endif()
31+
2232
# Create static library
23-
cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api paddle_inference_api
24-
analysis_predictor zero_copy_tensor)
33+
cc_library(paddle_fluid DEPS ${fluid_modules} ${STATIC_INFERENCE_APIS} zero_copy_tensor)
34+
2535
if(NOT APPLE)
2636
# TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac.
2737
set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym")
2838
set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
2939
endif()
3040

3141
# Create shared library
32-
cc_library(paddle_fluid_shared SHARED
33-
SRCS io.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api_impl.cc
34-
${CMAKE_CURRENT_SOURCE_DIR}/api/analysis_predictor.cc
35-
${CMAKE_CURRENT_SOURCE_DIR}/api/details/zero_copy_tensor.cc
42+
cc_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS}
3643
DEPS ${fluid_modules} paddle_fluid_api)
3744

3845
set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid)

paddle/fluid/inference/api/demo_ci/CMakeLists.txt

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ project(cpp_inference_demo CXX C)
33
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
44
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
55
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
6+
option(USE_TENSORRT "Compile demo with TensorRT." OFF)
67

78
macro(safe_set_static_flag)
89
foreach(flag_var
@@ -60,6 +61,13 @@ endif(NOT WIN32)
6061
include_directories("${PADDLE_LIB}/third_party/boost")
6162
include_directories("${PADDLE_LIB}/third_party/eigen3")
6263

64+
if (NOT WIN32)
65+
if (USE_TENSORRT AND WITH_GPU)
66+
include_directories("${TENSORRT_INCLUDE_DIR}")
67+
link_directories("${TENSORRT_LIB_DIR}")
68+
endif()
69+
endif(NOT WIN32)
70+
6371
if (NOT WIN32)
6472
link_directories("${PADDLE_LIB}/third_party/install/snappy/lib")
6573
link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib")
@@ -112,6 +120,10 @@ endif(NOT WIN32)
112120

113121
if(WITH_GPU)
114122
if(NOT WIN32)
123+
if (USE_TENSORRT)
124+
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
125+
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
126+
endif()
115127
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
116128
else()
117129
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )

paddle/fluid/inference/api/demo_ci/run.sh

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,9 @@ PADDLE_ROOT=$1
33
TURN_ON_MKL=$2 # use MKL or Openblas
44
TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
55
DATA_DIR=$4 # dataset
6+
TENSORRT_INCLUDE_DIR=$5 # TensorRT header file dir, defalut to /usr/local/TensorRT/include
7+
TENSORRT_LIB_DIR=$6 # TensorRT lib file dir, default to /usr/local/TensorRT/lib
8+
69
cd `dirname $0`
710
current_dir=`pwd`
811
if [ $2 == ON ]; then
@@ -16,6 +19,11 @@ else
1619
use_gpu_list='false'
1720
fi
1821

22+
USE_TENSORRT=OFF
23+
if [ [-d"$TENSORRT_INCLUDE_DIR"] -a [-d"$TENSORRT_LIB_DIR"] ]; then
24+
USE_TENSORRT=ON
25+
fi
26+
1927
PREFIX=inference-vis-demos%2F
2028
URL_ROOT=http://paddlemodels.cdn.bcebos.com/${PREFIX}
2129

@@ -86,5 +94,25 @@ for WITH_STATIC_LIB in ON OFF; do
8694
fi
8795
done
8896
done
97+
98+
# --------tensorrt mobilenet------
99+
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
100+
rm -rf *
101+
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
102+
-DWITH_MKL=$TURN_ON_MKL \
103+
-DDEMO_NAME=vis_demo \
104+
-DWITH_GPU=$TEST_GPU_CPU \
105+
-DWITH_STATIC_LIB=$WITH_STATIC_LIB \
106+
-DUSE_TENSORRT=$USE_TENSORRT \
107+
-DTENSORRT_INCLUDE_DIR=$TENSORRT_INCLUDE_DIR \
108+
-DTENSORRT_LIB_DIR=$TENSORRT_LIB_DIR
109+
make -j
110+
./vis_demo \
111+
--modeldir=$DATA_DIR/mobilenet/model \
112+
--data=$DATA_DIR/mobilenet/data.txt \
113+
--refer=$DATA_DIR/mobilenet/result.txt \
114+
--use_gpu=true \
115+
--use_trt=true
116+
fi
89117
done
90118
set +x

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 31 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ DEFINE_string(
3434
"path of data; each line is a record, format is "
3535
"'<space splitted floats as data>\t<space splitted ints as shape'");
3636
DEFINE_bool(use_gpu, false, "Whether use gpu.");
37+
DEFINE_bool(use_trt, false, "Whether use trt.");
3738

3839
namespace paddle {
3940
namespace demo {
@@ -100,20 +101,32 @@ void CheckOutput(const std::string& referfile, const PaddleTensor& output) {
100101
/*
101102
* Use the native fluid engine to inference the demo.
102103
*/
103-
void Main(bool use_gpu) {
104-
NativeConfig config;
105-
config.param_file = FLAGS_modeldir + "/__params__";
106-
config.prog_file = FLAGS_modeldir + "/__model__";
107-
config.use_gpu = use_gpu;
108-
config.device = 0;
109-
if (FLAGS_use_gpu) {
104+
void Main(bool use_gpu, bool use_trt) {
105+
std::unique_ptr<PaddlePredictor> predictor;
106+
if (!use_trt) {
107+
NativeConfig config;
108+
config.param_file = FLAGS_modeldir + "/__params__";
109+
config.prog_file = FLAGS_modeldir + "/__model__";
110+
config.use_gpu = use_gpu;
111+
config.device = 0;
112+
if (FLAGS_use_gpu) {
113+
config.fraction_of_gpu_memory = 0.1; // set by yourself
114+
}
115+
116+
VLOG(3) << "init predictor";
117+
predictor =
118+
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
119+
} else {
120+
paddle::contrib::MixedRTConfig config;
121+
config.param_file = FLAGS_modeldir + "/__params__";
122+
config.prog_file = FLAGS_modeldir + "/__model__";
123+
config.use_gpu = true;
124+
config.device = 0;
125+
config.max_batch_size = 1;
110126
config.fraction_of_gpu_memory = 0.1; // set by yourself
127+
predictor = CreatePaddlePredictor<paddle::contrib::MixedRTConfig>(config);
111128
}
112129

113-
VLOG(3) << "init predictor";
114-
auto predictor =
115-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
116-
117130
VLOG(3) << "begin to process data";
118131
// Just a single batch of data.
119132
std::string line;
@@ -131,7 +144,7 @@ void Main(bool use_gpu) {
131144

132145
VLOG(3) << "run executor";
133146
std::vector<PaddleTensor> output;
134-
predictor->Run({input}, &output);
147+
predictor->Run({input}, &output, 1);
135148

136149
VLOG(3) << "output.size " << output.size();
137150
auto& tensor = output.front();
@@ -146,9 +159,12 @@ void Main(bool use_gpu) {
146159

147160
int main(int argc, char** argv) {
148161
google::ParseCommandLineFlags(&argc, &argv, true);
149-
paddle::demo::Main(false /* use_gpu*/);
150-
if (FLAGS_use_gpu) {
151-
paddle::demo::Main(true /*use_gpu*/);
162+
if (FLAGS_use_gpu && FLAGS_use_trt) {
163+
paddle::demo::Main(true /*use_gpu*/, true);
164+
} else if (FLAGS_use_gpu) {
165+
paddle::demo::Main(true /*use_gpu*/, false);
166+
} else {
167+
paddle::demo::Main(false /*use_gpu*/, false /*use_tensorrt*/);
152168
}
153169
return 0;
154170
}

paddle/scripts/paddle_build.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -683,7 +683,7 @@ function test_fluid_inference_lib() {
683683
========================================
684684
EOF
685685
cd ${PADDLE_ROOT}/paddle/fluid/inference/api/demo_ci
686-
./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} ${INFERENCE_DEMO_INSTALL_DIR}
686+
./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} ${INFERENCE_DEMO_INSTALL_DIR} ${TENSORRT_INCLUDE_DIR:-/usr/local/TensorRT/include} ${TENSORRT_LIB_DIR:-/usr/local/TensorRT/lib}
687687
./clean.sh
688688
fi
689689
}

0 commit comments

Comments
 (0)