Skip to content

Commit a7c1ed1

Browse files
authored
Merge pull request #12115 from luotao1/demo
add independent demo for test static fluid library
2 parents 7040c67 + a752236 commit a7c1ed1

File tree

4 files changed

+29
-29
lines changed

4 files changed

+29
-29
lines changed

paddle/contrib/inference/demo_ci/CMakeLists.txt

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,14 +52,12 @@ else()
5252
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a)
5353
endif()
5454

55+
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
5556
if(WITH_STATIC_LIB)
5657
set(DEPS
57-
"-Wl,--whole-archive"
58-
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a
59-
"-Wl,--no-whole-archive"
60-
${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a)
58+
${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a
59+
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a)
6160
else()
62-
# Note: libpaddle_inference_api.so must put before libpaddle_fluid.so
6361
set(DEPS
6462
${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.so
6563
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so)
Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,13 @@
11
set -x
22
PADDLE_ROOT=$1
3-
WITH_MKL=$2
4-
WITH_GPU=$3
5-
if [ $3 == "ON" ]; then
3+
TURN_ON_MKL=$2 # use MKL or Openblas
4+
TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
5+
if [ $2 == ON ]; then
6+
# You can export yourself if move the install path
7+
MKL_LIB=${PADDLE_ROOT}/build/fluid_install_dir/third_party/install/mklml/lib
8+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB}
9+
fi
10+
if [ $3 == ON ]; then
611
use_gpu_list='true false'
712
else
813
use_gpu_list='false'
@@ -11,24 +16,22 @@ fi
1116
mkdir -p build
1217
cd build
1318

14-
for WITH_STATIC_LIB in false; do
19+
for WITH_STATIC_LIB in ON OFF; do
1520
rm -rf *
1621
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
17-
-DWITH_MKL=$WITH_MKL \
22+
-DWITH_MKL=$TURN_ON_MKL \
1823
-DDEMO_NAME=simple_on_word2vec \
19-
-DWITH_GPU=$WITH_GPU \
24+
-DWITH_GPU=$TEST_GPU_CPU \
2025
-DWITH_STATIC_LIB=$WITH_STATIC_LIB
21-
make
26+
make -j
2227
for use_gpu in $use_gpu_list; do
2328
./simple_on_word2vec \
2429
--dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \
2530
--use_gpu=$use_gpu
31+
if [ $? -ne 0 ]; then
32+
echo "inference demo runs fail."
33+
exit 1
34+
fi
2635
done
2736
done
28-
if [ $? -eq 0 ]; then
29-
exit 0
30-
else
31-
echo "inference demo runs fail."
32-
exit 1
33-
fi
3437
set +x

paddle/fluid/inference/CMakeLists.txt

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,10 @@
1+
# analysis and tensorrt must be added before creating static library,
2+
# otherwise, there would be undefined reference to them in static library.
3+
add_subdirectory(analysis)
4+
if (TENSORRT_FOUND)
5+
add_subdirectory(tensorrt)
6+
endif()
7+
18
set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor )
29

310
# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal?
@@ -7,10 +14,6 @@ cc_library(paddle_fluid_api
714

815
get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
916

10-
if(WITH_CONTRIB)
11-
set(fluid_modules "${fluid_modules}" paddle_inference_api)
12-
endif()
13-
1417
# Create static library
1518
cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api)
1619
if(NOT APPLE)
@@ -35,9 +38,3 @@ if(WITH_TESTING)
3538
# both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book
3639
add_subdirectory(tests/book)
3740
endif()
38-
39-
add_subdirectory(analysis)
40-
41-
if (TENSORRT_FOUND)
42-
add_subdirectory(tensorrt)
43-
endif()

paddle/scripts/paddle_build.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,6 +516,7 @@ function gen_fluid_inference_lib() {
516516
Deploying fluid inference library ...
517517
========================================
518518
EOF
519+
cmake .. -DWITH_DISTRIBUTE=OFF
519520
make -j `nproc` inference_lib_dist
520521
cd ${PADDLE_ROOT}/build
521522
cp -r fluid_install_dir fluid
@@ -531,7 +532,7 @@ function test_fluid_inference_lib() {
531532
========================================
532533
EOF
533534
cd ${PADDLE_ROOT}/paddle/contrib/inference/demo_ci
534-
sh run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF}
535+
./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF}
535536
fi
536537
}
537538

@@ -577,6 +578,7 @@ function main() {
577578
fluid_inference_lib)
578579
cmake_gen ${PYTHON_ABI:-""}
579580
gen_fluid_inference_lib
581+
test_fluid_inference_lib
580582
;;
581583
check_style)
582584
check_style

0 commit comments

Comments
 (0)