Skip to content

Commit a35e7f4

Browse files
committed
adjust demo_ci with fluid_inference_install_dir
test=develop
1 parent fc63aa7 commit a35e7f4

File tree

7 files changed

+18
-12
lines changed

7 files changed

+18
-12
lines changed

paddle/fluid/inference/api/demo_ci/CMakeLists.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ endif(NOT WIN32)
7777
link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib")
7878
link_directories("${PADDLE_LIB}/third_party/install/glog/lib")
7979
link_directories("${PADDLE_LIB}/third_party/install/gflags/lib")
80-
link_directories("${PADDLE_LIB}/paddle/fluid/inference")
80+
link_directories("${PADDLE_LIB}/paddle/lib")
8181

8282
add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
8383

@@ -97,10 +97,10 @@ endif()
9797
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
9898
if(WITH_STATIC_LIB)
9999
set(DEPS
100-
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
100+
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
101101
else()
102102
set(DEPS
103-
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
103+
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
104104
endif()
105105

106106
if (NOT WIN32)

paddle/fluid/inference/api/demo_ci/run.sh

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,13 @@ TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
55
DATA_DIR=$4 # dataset
66
TENSORRT_INCLUDE_DIR=$5 # TensorRT header file dir, defalut to /usr/local/TensorRT/include
77
TENSORRT_LIB_DIR=$6 # TensorRT lib file dir, default to /usr/local/TensorRT/lib
8+
inference_install_dir=${PADDLE_ROOT}/build/fluid_inference_install_dir
89

910
cd `dirname $0`
1011
current_dir=`pwd`
1112
if [ $2 == ON ]; then
1213
# You can export yourself if move the install path
13-
MKL_LIB=${PADDLE_ROOT}/build/fluid_install_dir/third_party/install/mklml/lib
14+
MKL_LIB=${inference_install_dir}/third_party/install/mklml/lib
1415
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB}
1516
fi
1617
if [ $3 == ON ]; then
@@ -55,7 +56,7 @@ cd build
5556
for WITH_STATIC_LIB in ON OFF; do
5657
# -----simple_on_word2vec-----
5758
rm -rf *
58-
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
59+
cmake .. -DPADDLE_LIB=${inference_install_dir} \
5960
-DWITH_MKL=$TURN_ON_MKL \
6061
-DDEMO_NAME=simple_on_word2vec \
6162
-DWITH_GPU=$TEST_GPU_CPU \
@@ -75,7 +76,7 @@ for WITH_STATIC_LIB in ON OFF; do
7576
fi
7677
# ---------vis_demo---------
7778
rm -rf *
78-
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
79+
cmake .. -DPADDLE_LIB=${inference_install_dir} \
7980
-DWITH_MKL=$TURN_ON_MKL \
8081
-DDEMO_NAME=vis_demo \
8182
-DWITH_GPU=$TEST_GPU_CPU \
@@ -98,7 +99,7 @@ for WITH_STATIC_LIB in ON OFF; do
9899
# --------tensorrt mobilenet------
99100
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
100101
rm -rf *
101-
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
102+
cmake .. -DPADDLE_LIB=${inference_install_dir} \
102103
-DWITH_MKL=$TURN_ON_MKL \
103104
-DDEMO_NAME=trt_mobilenet_demo \
104105
-DWITH_GPU=$TEST_GPU_CPU \

paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ limitations under the License. */
2323
#include <memory>
2424
#include <thread> //NOLINT
2525

26-
#include "paddle/fluid/inference/paddle_inference_api.h"
26+
#include "paddle/include/paddle_inference_api.h"
2727

2828
DEFINE_string(dirname, "", "Directory of the inference model.");
2929
DEFINE_bool(use_gpu, false, "Whether use gpu.");

paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ limitations under the License. */
1818

1919
#include <gflags/gflags.h>
2020
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
21-
#include "paddle/fluid/inference/demo_ci/utils.h"
21+
#include "utils.h" // NOLINT
2222

2323
DECLARE_double(fraction_of_gpu_memory_to_use);
2424
DEFINE_string(modeldir, "", "Directory of the inference model.");

paddle/fluid/inference/api/demo_ci/utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
#include <iostream>
1919
#include <string>
2020
#include <vector>
21-
#include "paddle/fluid/inference/paddle_inference_api.h"
21+
#include "paddle/include/paddle_inference_api.h"
2222

2323
namespace paddle {
2424
namespace demo {

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ limitations under the License. */
1818

1919
#include <gflags/gflags.h>
2020
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
21-
#include "paddle/fluid/inference/demo_ci/utils.h"
21+
#include "utils.h" // NOLINT
2222

2323
#ifdef PADDLE_WITH_CUDA
2424
DECLARE_double(fraction_of_gpu_memory_to_use);

paddle/scripts/paddle_build.sh

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -659,6 +659,7 @@ function gen_fluid_lib() {
659659
EOF
660660
cmake .. -DWITH_DISTRIBUTE=OFF
661661
make -j `nproc` fluid_lib_dist
662+
make -j `nproc` inference_lib_dist
662663
fi
663664
}
664665

@@ -672,6 +673,8 @@ EOF
672673
cd ${PADDLE_ROOT}/build
673674
cp -r fluid_install_dir fluid
674675
tar -czf fluid.tgz fluid
676+
cp -r fluid_inference_install_dir fluid_inference
677+
tar -czf fluid_inference.tgz fluid_inference
675678
fi
676679
}
677680

@@ -683,7 +686,9 @@ function test_fluid_lib() {
683686
========================================
684687
EOF
685688
cd ${PADDLE_ROOT}/paddle/fluid/inference/api/demo_ci
686-
./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} ${INFERENCE_DEMO_INSTALL_DIR} ${TENSORRT_INCLUDE_DIR:-/usr/local/TensorRT/include} ${TENSORRT_LIB_DIR:-/usr/local/TensorRT/lib}
689+
./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} ${INFERENCE_DEMO_INSTALL_DIR} \
690+
${TENSORRT_INCLUDE_DIR:-/usr/local/TensorRT/include} \
691+
${TENSORRT_LIB_DIR:-/usr/local/TensorRT/lib}
687692
./clean.sh
688693
fi
689694
}

0 commit comments

Comments
 (0)