Skip to content

Commit 7d680be

Browse files
committed
Merge branch 'develop' into mkldnn_test
2 parents 6a4e923 + a831ecc commit 7d680be

21 files changed

+684
-57
lines changed

CMakeLists.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,9 @@ set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
127127
set(FLUID_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_install_dir" CACHE STRING
128128
"A path setting fluid shared and static libraries")
129129

130+
set(FLUID_INFERENCE_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_inference_install_dir" CACHE STRING
131+
"A path setting fluid inference shared and static libraries")
132+
130133
if (WITH_C_API AND WITH_PYTHON)
131134
message(WARNING "It is suggest not embedded a python interpreter in Paddle "
132135
"when using C-API. It will give an unpredictable behavior when using a "

README.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ Our vision is to enable deep learning for everyone via PaddlePaddle.
1919
Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddle/releases) to track the latest feature of PaddlePaddle.
2020

2121

22-
### Latest PaddlePaddle Release: [Fluid 0.15.0](https://github.com/PaddlePaddle/Paddle/tree/v0.15.0)
22+
### Latest PaddlePaddle Release: [Fluid 1.0.0](https://github.com/PaddlePaddle/Paddle/tree/release/1.0.0)
2323
### Install Latest Stable Release:
2424
```
2525
# Linux CPU
@@ -76,26 +76,26 @@ pip install paddlepaddle-gpu==0.15.0.post85
7676

7777
## Installation
7878

79-
It is recommended to read [this doc](http://paddlepaddle.org/documentation/docs/zh/0.15.0/new_docs/beginners_guide/install/install_doc.html) on our website.
79+
It is recommended to read [this doc](http://paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/index.html) on our website.
8080

8181
## Documentation
8282

83-
We provide [English](http://paddlepaddle.org/documentation/docs/en/0.15.0/getstarted/index_en.html) and
84-
[Chinese](http://paddlepaddle.org/documentation/docs/zh/0.15.0/new_docs/beginners_guide/index.html) documentation.
83+
We provide [English](http://paddlepaddle.org/documentation/docs/en/1.0.0/getstarted/index_en.html) and
84+
[Chinese](http://paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/index.html) documentation.
8585

8686
- [Deep Learning 101](https://github.com/PaddlePaddle/book)
8787

8888
You might want to start from this online interactive book that can run in a Jupyter Notebook.
8989

90-
- [Distributed Training](http://paddlepaddle.org/documentation/docs/zh/0.15.0/new_docs/user_guides/howto/training/cluster_howto.html)
90+
- [Distributed Training](http://paddlepaddle.org/documentation/docs/zh/1.0/user_guides/howto/training/cluster_howto.html)
9191

9292
You can run distributed training jobs on MPI clusters.
9393

94-
- [Python API](http://paddlepaddle.org/documentation/api/zh/0.15.0/fluid.html)
94+
- [Python API](http://paddlepaddle.org/documentation/api/zh/1.0/fluid.html)
9595

9696
Our new API enables much shorter programs.
9797

98-
- [How to Contribute](http://paddlepaddle.org/documentation/docs/zh/0.15.0/new_docs/advanced_usage/development/contribute_to_paddle.html)
98+
- [How to Contribute](http://paddlepaddle.org/documentation/docs/zh/1.0/advanced_usage/development/contribute_to_paddle.html)
9999

100100
We appreciate your contributions!
101101

cmake/inference_lib.cmake

Lines changed: 37 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -150,16 +150,16 @@ if (WITH_ANAKIN AND WITH_MKL)
150150
SRCS
151151
${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/libinference_anakin_api* # compiled anakin api
152152
${ANAKIN_INSTALL_DIR} # anakin release
153-
DSTS ${dst_dir}/inference/anakin ${FLUID_INSTALL_DIR}/third_party/install/anakin)
153+
DSTS ${FLUID_INSTALL_DIR}/third_party/install/anakin ${FLUID_INSTALL_DIR}/third_party/install/anakin)
154154
list(APPEND inference_deps anakin_inference_lib)
155155
endif()
156156

157157
set(module "inference")
158158
copy(inference_lib DEPS ${inference_deps}
159159
SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.*
160-
${src_dir}/${module}/api/paddle_inference_api.h ${src_dir}/${module}/api/demo_ci
160+
${src_dir}/${module}/api/paddle_inference_api.h
161161
${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h
162-
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module}
162+
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module}
163163
)
164164

165165
set(module "platform")
@@ -188,18 +188,38 @@ copy(cmake_cache
188188
# This command generates a complete fluid library for both train and inference
189189
add_custom_target(fluid_lib_dist DEPENDS ${fluid_lib_dist_dep})
190190

191+
# Following commands generate a inference-only fluid library
192+
# third_party, version.txt and CMakeCache.txt are the same position with ${FLUID_INSTALL_DIR}
193+
copy(third_party DEPS fluid_lib_dist
194+
SRCS ${FLUID_INSTALL_DIR}/third_party ${FLUID_INSTALL_DIR}/CMakeCache.txt
195+
DSTS ${FLUID_INFERENCE_INSTALL_DIR} ${FLUID_INFERENCE_INSTALL_DIR}
196+
)
197+
198+
# only need libpaddle_fluid.so/a and paddle_inference_api.h for inference-only library
199+
copy(inference_api_lib DEPS fluid_lib_dist
200+
SRCS ${FLUID_INSTALL_DIR}/paddle/fluid/inference/libpaddle_fluid.*
201+
${FLUID_INSTALL_DIR}/paddle/fluid/inference/paddle_inference_api.h
202+
DSTS ${FLUID_INFERENCE_INSTALL_DIR}/paddle/lib ${FLUID_INFERENCE_INSTALL_DIR}/paddle/include
203+
)
204+
205+
add_custom_target(inference_lib_dist DEPENDS third_party inference_api_lib)
206+
191207
# paddle fluid version
192-
execute_process(
193-
COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1
194-
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
195-
OUTPUT_VARIABLE PADDLE_GIT_COMMIT)
196-
set(version_file ${FLUID_INSTALL_DIR}/version.txt)
197-
file(WRITE ${version_file}
198-
"GIT COMMIT ID: ${PADDLE_GIT_COMMIT}\n"
199-
"WITH_MKL: ${WITH_MKL}\n"
200-
"WITH_GPU: ${WITH_GPU}\n")
201-
if(WITH_GPU)
202-
file(APPEND ${version_file}
203-
"CUDA version: ${CUDA_VERSION}\n"
204-
"CUDNN version: v${CUDNN_MAJOR_VERSION}\n")
205-
endif()
208+
function(version version_file)
209+
execute_process(
210+
COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1
211+
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
212+
OUTPUT_VARIABLE PADDLE_GIT_COMMIT)
213+
file(WRITE ${version_file}
214+
"GIT COMMIT ID: ${PADDLE_GIT_COMMIT}\n"
215+
"WITH_MKL: ${WITH_MKL}\n"
216+
"WITH_MKLDNN: ${WITH_MKLDNN}\n"
217+
"WITH_GPU: ${WITH_GPU}\n")
218+
if(WITH_GPU)
219+
file(APPEND ${version_file}
220+
"CUDA version: ${CUDA_VERSION}\n"
221+
"CUDNN version: v${CUDNN_MAJOR_VERSION}\n")
222+
endif()
223+
endfunction()
224+
version(${FLUID_INSTALL_DIR}/version.txt)
225+
version(${FLUID_INFERENCE_INSTALL_DIR}/version.txt)

paddle/fluid/API.spec

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,7 @@ paddle.fluid.layers.mean ArgSpec(args=['x', 'name'], varargs=None, keywords=None
173173
paddle.fluid.layers.mul ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None))
174174
paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=['x', 'label', 'name'], varargs=None, keywords=None, defaults=(None,))
175175
paddle.fluid.layers.maxout ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,))
176+
paddle.fluid.layers.affine_channel ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None))
176177
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
177178
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
178179
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)

paddle/fluid/inference/api/demo_ci/CMakeLists.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ endif(NOT WIN32)
7777
link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib")
7878
link_directories("${PADDLE_LIB}/third_party/install/glog/lib")
7979
link_directories("${PADDLE_LIB}/third_party/install/gflags/lib")
80-
link_directories("${PADDLE_LIB}/paddle/fluid/inference")
80+
link_directories("${PADDLE_LIB}/paddle/lib")
8181

8282
add_executable(${DEMO_NAME} ${DEMO_NAME}.cc)
8383

@@ -97,10 +97,10 @@ endif()
9797
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
9898
if(WITH_STATIC_LIB)
9999
set(DEPS
100-
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
100+
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
101101
else()
102102
set(DEPS
103-
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
103+
${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
104104
endif()
105105

106106
if (NOT WIN32)

paddle/fluid/inference/api/demo_ci/run.sh

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,13 @@ TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
55
DATA_DIR=$4 # dataset
66
TENSORRT_INCLUDE_DIR=$5 # TensorRT header file dir, defalut to /usr/local/TensorRT/include
77
TENSORRT_LIB_DIR=$6 # TensorRT lib file dir, default to /usr/local/TensorRT/lib
8+
inference_install_dir=${PADDLE_ROOT}/build/fluid_inference_install_dir
89

910
cd `dirname $0`
1011
current_dir=`pwd`
1112
if [ $2 == ON ]; then
1213
# You can export yourself if move the install path
13-
MKL_LIB=${PADDLE_ROOT}/build/fluid_install_dir/third_party/install/mklml/lib
14+
MKL_LIB=${inference_install_dir}/third_party/install/mklml/lib
1415
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB}
1516
fi
1617
if [ $3 == ON ]; then
@@ -55,7 +56,7 @@ cd build
5556
for WITH_STATIC_LIB in ON OFF; do
5657
# -----simple_on_word2vec-----
5758
rm -rf *
58-
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
59+
cmake .. -DPADDLE_LIB=${inference_install_dir} \
5960
-DWITH_MKL=$TURN_ON_MKL \
6061
-DDEMO_NAME=simple_on_word2vec \
6162
-DWITH_GPU=$TEST_GPU_CPU \
@@ -75,7 +76,7 @@ for WITH_STATIC_LIB in ON OFF; do
7576
fi
7677
# ---------vis_demo---------
7778
rm -rf *
78-
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
79+
cmake .. -DPADDLE_LIB=${inference_install_dir} \
7980
-DWITH_MKL=$TURN_ON_MKL \
8081
-DDEMO_NAME=vis_demo \
8182
-DWITH_GPU=$TEST_GPU_CPU \
@@ -98,7 +99,7 @@ for WITH_STATIC_LIB in ON OFF; do
9899
# --------tensorrt mobilenet------
99100
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
100101
rm -rf *
101-
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
102+
cmake .. -DPADDLE_LIB=${inference_install_dir} \
102103
-DWITH_MKL=$TURN_ON_MKL \
103104
-DDEMO_NAME=trt_mobilenet_demo \
104105
-DWITH_GPU=$TEST_GPU_CPU \

paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ limitations under the License. */
2323
#include <memory>
2424
#include <thread> //NOLINT
2525

26-
#include "paddle/fluid/inference/paddle_inference_api.h"
26+
#include "paddle/include/paddle_inference_api.h"
2727

2828
DEFINE_string(dirname, "", "Directory of the inference model.");
2929
DEFINE_bool(use_gpu, false, "Whether use gpu.");
@@ -42,8 +42,7 @@ void Main(bool use_gpu) {
4242
config.use_gpu = use_gpu;
4343
config.fraction_of_gpu_memory = 0.15;
4444
config.device = 0;
45-
auto predictor =
46-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
45+
auto predictor = CreatePaddlePredictor<NativeConfig>(config);
4746

4847
for (int batch_id = 0; batch_id < 3; batch_id++) {
4948
//# 2. Prepare input.
@@ -85,8 +84,7 @@ void MainThreads(int num_threads, bool use_gpu) {
8584
config.use_gpu = use_gpu;
8685
config.fraction_of_gpu_memory = 0.15;
8786
config.device = 0;
88-
auto main_predictor =
89-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
87+
auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);
9088

9189
std::vector<std::thread> threads;
9290
for (int tid = 0; tid < num_threads; ++tid) {

paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ limitations under the License. */
1818

1919
#include <gflags/gflags.h>
2020
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
21-
#include "paddle/fluid/inference/demo_ci/utils.h"
21+
#include "utils.h" // NOLINT
2222

2323
DECLARE_double(fraction_of_gpu_memory_to_use);
2424
DEFINE_string(modeldir, "", "Directory of the inference model.");

paddle/fluid/inference/api/demo_ci/utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
#include <iostream>
1919
#include <string>
2020
#include <vector>
21-
#include "paddle/fluid/inference/paddle_inference_api.h"
21+
#include "paddle/include/paddle_inference_api.h"
2222

2323
namespace paddle {
2424
namespace demo {

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ limitations under the License. */
1818

1919
#include <gflags/gflags.h>
2020
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
21-
#include "paddle/fluid/inference/demo_ci/utils.h"
21+
#include "utils.h" // NOLINT
2222

2323
#ifdef PADDLE_WITH_CUDA
2424
DECLARE_double(fraction_of_gpu_memory_to_use);
@@ -34,12 +34,13 @@ DEFINE_bool(use_gpu, false, "Whether use gpu.");
3434
namespace paddle {
3535
namespace demo {
3636

37+
using contrib::AnalysisConfig;
3738
/*
38-
* Use the native fluid engine to inference the demo.
39+
* Use the native and analysis fluid engine to inference the demo.
3940
*/
4041
void Main(bool use_gpu) {
41-
std::unique_ptr<PaddlePredictor> predictor;
42-
NativeConfig config;
42+
std::unique_ptr<PaddlePredictor> predictor, analysis_predictor;
43+
AnalysisConfig config;
4344
config.param_file = FLAGS_modeldir + "/__params__";
4445
config.prog_file = FLAGS_modeldir + "/__model__";
4546
config.use_gpu = use_gpu;
@@ -49,8 +50,8 @@ void Main(bool use_gpu) {
4950
}
5051

5152
VLOG(3) << "init predictor";
52-
predictor =
53-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
53+
predictor = CreatePaddlePredictor<NativeConfig>(config);
54+
analysis_predictor = CreatePaddlePredictor<AnalysisConfig>(config);
5455

5556
VLOG(3) << "begin to process data";
5657
// Just a single batch of data.
@@ -68,7 +69,7 @@ void Main(bool use_gpu) {
6869
input.dtype = PaddleDType::FLOAT32;
6970

7071
VLOG(3) << "run executor";
71-
std::vector<PaddleTensor> output;
72+
std::vector<PaddleTensor> output, analysis_output;
7273
predictor->Run({input}, &output, 1);
7374

7475
VLOG(3) << "output.size " << output.size();
@@ -77,6 +78,10 @@ void Main(bool use_gpu) {
7778

7879
// compare with reference result
7980
CheckOutput(FLAGS_refer, tensor);
81+
82+
// the analysis_output has some diff with native_output,
83+
// TODO(luotao): add CheckOutput for analysis_output later.
84+
analysis_predictor->Run({input}, &analysis_output, 1);
8085
}
8186

8287
} // namespace demo

0 commit comments

Comments
 (0)