Skip to content

Commit 07f495e

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into fix_python35_CI_random_fail
2 parents 2bb0ac9 + f76f42c commit 07f495e

File tree

12 files changed

+153
-82
lines changed

12 files changed

+153
-82
lines changed

CMakeLists.txt

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,9 +213,11 @@ include(configure) # add paddle env configuration
213213
if(WITH_GPU)
214214
include(cuda)
215215
include(tensorrt)
216+
endif()
217+
if(WITH_MKL OR WITH_MKLML)
216218
include(external/anakin)
217219
elseif()
218-
set(WITH_ANAKIN OFF CACHE STRING "Anakin is used in GPU only now." FORCE)
220+
set(WITH_ANAKIN OFF CACHE STRING "Anakin is used in MKL only now." FORCE)
219221
endif()
220222

221223
include(generic) # simplify cmake module

cmake/external/anakin.cmake

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -16,16 +16,6 @@ set(ANAKIN_LIBRARY ${ANAKIN_INSTALL_DIR})
1616
set(ANAKIN_SHARED_LIB ${ANAKIN_LIBRARY}/libanakin.so)
1717
set(ANAKIN_SABER_LIB ${ANAKIN_LIBRARY}/libanakin_saber_common.so)
1818

19-
# TODO(luotao): ANAKIN_MODLE_URL etc will move to demo ci later.
20-
set(INFERENCE_URL "http://paddle-inference-dist.bj.bcebos.com")
21-
set(ANAKIN_MODLE_URL "${INFERENCE_URL}/mobilenet_v2.anakin.bin")
22-
set(ANAKIN_RNN_MODLE_URL "${INFERENCE_URL}/anakin_test%2Fditu_rnn.anakin2.model.bin")
23-
set(ANAKIN_RNN_DATA_URL "${INFERENCE_URL}/anakin_test%2Fditu_rnn_data.txt")
24-
execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_SOURCE_DIR}")
25-
execute_process(COMMAND bash -c "cd ${ANAKIN_SOURCE_DIR}; wget -q --no-check-certificate ${ANAKIN_MODLE_URL} -N")
26-
execute_process(COMMAND bash -c "cd ${ANAKIN_SOURCE_DIR}; wget -q --no-check-certificate ${ANAKIN_RNN_MODLE_URL} -N")
27-
execute_process(COMMAND bash -c "cd ${ANAKIN_SOURCE_DIR}; wget -q --no-check-certificate ${ANAKIN_RNN_DATA_URL} -N")
28-
2919
include_directories(${ANAKIN_INCLUDE})
3020
include_directories(${ANAKIN_INCLUDE}/saber/)
3121
include_directories(${ANAKIN_INCLUDE}/saber/core/)
@@ -48,6 +38,11 @@ set(ANAKIN_COMPILE_EXTRA_FLAGS
4838
-Wno-reorder
4939
-Wno-error=cpp)
5040

41+
if(WITH_GPU)
42+
set(CMAKE_ARGS_PREFIX -DUSE_GPU_PLACE=YES -DCUDNN_ROOT=${CUDNN_ROOT} -DCUDNN_INCLUDE_DIR=${CUDNN_INCLUDE_DIR})
43+
else()
44+
set(CMAKE_ARGS_PREFIX -DUSE_GPU_PLACE=NO)
45+
endif()
5146
ExternalProject_Add(
5247
extern_anakin
5348
${EXTERNAL_PROJECT_LOG_ARGS}
@@ -56,13 +51,11 @@ ExternalProject_Add(
5651
GIT_TAG "9424277cf9ae180a14aff09560d3cd60a49c76d2"
5752
PREFIX ${ANAKIN_SOURCE_DIR}
5853
UPDATE_COMMAND ""
59-
CMAKE_ARGS -DUSE_GPU_PLACE=YES
54+
CMAKE_ARGS ${CMAKE_ARGS_PREFIX}
6055
-DUSE_X86_PLACE=YES
6156
-DBUILD_WITH_UNIT_TEST=NO
6257
-DPROTOBUF_ROOT=${THIRD_PARTY_PATH}/install/protobuf
6358
-DMKLML_ROOT=${THIRD_PARTY_PATH}/install/mklml
64-
-DCUDNN_ROOT=${CUDNN_ROOT}
65-
-DCUDNN_INCLUDE_DIR=${CUDNN_INCLUDE_DIR}
6659
-DENABLE_OP_TIMER=${ANAKIN_ENABLE_OP_TIMER}
6760
${EXTERNAL_OPTIONAL_ARGS}
6861
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ANAKIN_INSTALL_DIR}

cmake/inference_lib.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ copy(memory_lib
145145
set(inference_deps paddle_fluid_shared paddle_fluid)
146146

147147
set(module "inference/api")
148-
if (WITH_ANAKIN AND WITH_GPU)
148+
if (WITH_ANAKIN AND WITH_MKL)
149149
copy(anakin_inference_lib DEPS paddle_inference_api inference_anakin_api
150150
SRCS
151151
${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/libinference_anakin_api* # compiled anakin api

doc/fluid/api/layers.rst

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -822,6 +822,14 @@ pad
822822
.. autofunction:: paddle.fluid.layers.pad
823823
:noindex:
824824

825+
.. _api_fluid_layers_pad_constant_like:
826+
827+
pad_constant_like
828+
---
829+
830+
.. autofunction:: paddle.fluid.layers.pad_constant_like
831+
:noindex:
832+
825833
.. _api_fluid_layers_label_smooth:
826834

827835
label_smooth
@@ -1145,6 +1153,14 @@ sigmoid
11451153
.. autofunction:: paddle.fluid.layers.sigmoid
11461154
:noindex:
11471155

1156+
.. _api_fluid_layers_hsigmoid:
1157+
1158+
hsigmoid
1159+
-------
1160+
1161+
.. autofunction:: paddle.fluid.layers.hsigmoid
1162+
:noindex:
1163+
11481164
.. _api_fluid_layers_logsigmoid:
11491165

11501166
logsigmoid

doc/survey/dynamic_graph.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ x = Variable(randn(20, 1)))
3030
label = Variable(randint(1))
3131
W_1, W_2 = Variable(randn(20, 20)), Variable(randn(10, 20))
3232
h = matmul(W_1, x)
33-
pred = matmul(W_2, x)
33+
pred = matmul(W_2, h)
3434
loss = softmax(pred, label)
3535
loss.backward()
3636
```

paddle/fluid/framework/ir/CMakeLists.txt

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,21 @@
11
set(pass_file ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h)
22
file(WRITE ${pass_file} "// Generated by the paddle/fluid/framework/ir/CMakeLists.txt. DO NOT EDIT!\n\n")
33
file(APPEND ${pass_file} "\#include \"paddle/fluid/framework/ir/pass.h\"\n")
4-
function(pass_library TARGET)
4+
5+
6+
# Usage: pass_library(target inference) will append to paddle_inference_pass.h
7+
function(pass_library TARGET DEST)
58
set(options "")
69
set(oneValueArgs "")
710
set(multiValueArgs SRCS DEPS)
811
cmake_parse_arguments(op_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
9-
cc_library(${TARGET} SRCS ${TARGET}.cc DEPS graph_pattern_detector pass)
10-
file(APPEND ${pass_file} "USE_PASS(${TARGET});\n")
11-
set(PASS_LIBRARY ${TARGET} ${PASS_LIBRARY} PARENT_SCOPE)
12+
cc_library(${TARGET} SRCS ${TARGET}.cc DEPS graph_pattern_detector pass ${op_library_DEPS})
13+
# add more DEST here, such as train, dist and collect USE_PASS into a file automatically.
14+
if (${DEST} STREQUAL "base" OR ${DEST} STREQUAL "inference")
15+
message(STATUS "add pass ${TARGET} ${DEST}")
16+
file(APPEND ${pass_file} "USE_PASS(${TARGET});\n")
17+
set(PASS_LIBRARY ${TARGET} ${PASS_LIBRARY} PARENT_SCOPE)
18+
endif()
1219
endfunction()
1320

1421
cc_library(node SRCS node.cc DEPS proto_desc)
@@ -18,13 +25,13 @@ cc_library(pass SRCS pass.cc DEPS graph node graph_helper)
1825
cc_library(graph_traits SRCS graph_traits.cc DEPS graph)
1926
cc_library(graph_pattern_detector SRCS graph_pattern_detector.cc DEPS graph graph_helper graph_traits)
2027

21-
pass_library(graph_to_program_pass)
22-
pass_library(graph_viz_pass)
23-
pass_library(fc_fuse_pass)
24-
pass_library(attention_lstm_fuse_pass)
25-
pass_library(infer_clean_graph_pass)
26-
pass_library(fc_lstm_fuse_pass)
27-
pass_library(seq_concat_fc_fuse_pass)
28+
pass_library(graph_to_program_pass base)
29+
pass_library(graph_viz_pass base)
30+
pass_library(fc_fuse_pass inference)
31+
pass_library(attention_lstm_fuse_pass inference)
32+
pass_library(infer_clean_graph_pass inference)
33+
pass_library(fc_lstm_fuse_pass inference)
34+
pass_library(seq_concat_fc_fuse_pass inference)
2835
set(GLOB_PASS_LIB ${PASS_LIBRARY} CACHE INTERNAL "Global PASS library")
2936

3037
cc_test(pass_test SRCS pass_test.cc DEPS graph pass graph_helper)

paddle/fluid/inference/analysis/CMakeLists.txt

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,12 +100,17 @@ inference_analysis_test(test_analyzer_lac SRCS analyzer_lac_tester.cc
100100

101101

102102
set(TEXT_CLASSIFICATION_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/text-classification-Senta.tar.gz")
103+
set(TEXT_CLASSIFICATION_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/text_classification_data.txt.tar.gz")
103104
set(TEXT_CLASSIFICATION_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/text_classification" CACHE PATH "Text Classification model and data root." FORCE)
104105

105106
if (NOT EXISTS ${TEXT_CLASSIFICATION_INSTALL_DIR} AND WITH_TESTING AND WITH_INFERENCE)
106107
inference_download_and_uncompress(${TEXT_CLASSIFICATION_INSTALL_DIR} ${TEXT_CLASSIFICATION_MODEL_URL} "text-classification-Senta.tar.gz")
108+
inference_download_and_uncompress(${TEXT_CLASSIFICATION_INSTALL_DIR} ${TEXT_CLASSIFICATION_DATA_URL} "text_classification_data.txt.tar.gz")
107109
endif()
108110

109111
inference_analysis_test(test_text_classification SRCS analyzer_text_classification_tester.cc
110112
EXTRA_DEPS paddle_inference_api paddle_fluid_api analysis_predictor
111-
ARGS --infer_model=${TEXT_CLASSIFICATION_INSTALL_DIR}/text-classification-Senta)
113+
ARGS --infer_model=${TEXT_CLASSIFICATION_INSTALL_DIR}/text-classification-Senta
114+
--infer_data=${TEXT_CLASSIFICATION_INSTALL_DIR}/data.txt
115+
--topn=1 # Just run top 1 batch.
116+
)

paddle/fluid/inference/analysis/analyzer_lac_tester.cc

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -114,12 +114,6 @@ void GetOneBatch(std::vector<PaddleTensor> *input_slots, DataRecord *data,
114114
PADDLE_ENFORCE_EQ(batch_size, static_cast<int>(one_batch.lod.size() - 1));
115115
input_slots->assign({input_tensor});
116116
}
117-
static void PrintTime(const double latency, const int bs, const int repeat) {
118-
LOG(INFO) << "===========profile result===========";
119-
LOG(INFO) << "batch_size: " << bs << ", repeat: " << repeat
120-
<< ", avg latency: " << latency / repeat << "ms";
121-
LOG(INFO) << "=====================================";
122-
}
123117
void BenchAllData(const std::string &model_path, const std::string &data_file,
124118
const int batch_size, const int repeat) {
125119
NativeConfig config;
@@ -145,7 +139,7 @@ void BenchAllData(const std::string &model_path, const std::string &data_file,
145139
sum += timer.toc();
146140
}
147141
}
148-
PrintTime(sum, batch_size, repeat);
142+
PrintTime(batch_size, repeat, 1, 0, sum / repeat);
149143
}
150144
const int64_t lac_ref_data[] = {24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25,
151145
25, 25, 25, 25, 44, 24, 25, 25, 25, 36, 42, 43,
@@ -176,7 +170,7 @@ void TestLACPrediction(const std::string &model_path,
176170
for (int i = 0; i < repeat; i++) {
177171
predictor->Run(input_slots, &outputs_slots);
178172
}
179-
PrintTime(timer.toc(), batch_size, repeat);
173+
PrintTime(batch_size, repeat, 1, 0, timer.toc() / repeat);
180174
EXPECT_EQ(outputs_slots.size(), 1UL);
181175
auto &out = outputs_slots[0];
182176
size_t size = std::accumulate(out.shape.begin(), out.shape.end(), 1,

paddle/fluid/inference/analysis/analyzer_ner_tester.cc

Lines changed: 28 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ DEFINE_string(infer_model, "", "model path");
2525
DEFINE_string(infer_data, "", "data path");
2626
DEFINE_int32(batch_size, 10, "batch size.");
2727
DEFINE_int32(repeat, 1, "Running the inference program repeat times.");
28+
DEFINE_bool(test_all_data, false, "Test the all dataset in data file.");
2829

2930
namespace paddle {
3031
namespace inference {
@@ -35,6 +36,7 @@ struct DataRecord {
3536
std::vector<size_t> lod; // two inputs have the same lod info.
3637
size_t batch_iter{0};
3738
size_t batch_size{1};
39+
size_t num_samples; // total number of samples
3840
DataRecord() = default;
3941
explicit DataRecord(const std::string &path, int batch_size = 1)
4042
: batch_size(batch_size) {
@@ -81,6 +83,7 @@ struct DataRecord {
8183
word_data_all.push_back(std::move(word_data));
8284
mention_data_all.push_back(std::move(mention_data));
8385
}
86+
num_samples = num_lines;
8487
}
8588
};
8689

@@ -120,21 +123,38 @@ void TestChineseNERPrediction() {
120123
auto predictor =
121124
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
122125
std::vector<PaddleTensor> input_slots;
123-
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
126+
std::vector<PaddleTensor> outputs;
127+
Timer timer;
128+
129+
if (FLAGS_test_all_data) {
130+
LOG(INFO) << "test all data";
131+
double sum = 0;
132+
size_t num_samples;
133+
for (int i = 0; i < FLAGS_repeat; i++) {
134+
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
135+
num_samples = data.num_samples;
136+
for (size_t bid = 0; bid < num_samples; ++bid) {
137+
PrepareInputs(&input_slots, &data, FLAGS_batch_size);
138+
timer.tic();
139+
predictor->Run(input_slots, &outputs);
140+
sum += timer.toc();
141+
}
142+
}
143+
LOG(INFO) << "total number of samples: " << num_samples;
144+
PrintTime(FLAGS_batch_size, FLAGS_repeat, 1, 0, sum / FLAGS_repeat);
145+
LOG(INFO) << "average latency of each sample: "
146+
<< sum / FLAGS_repeat / num_samples;
147+
return;
148+
}
124149
// Prepare inputs.
150+
DataRecord data(FLAGS_infer_data, FLAGS_batch_size);
125151
PrepareInputs(&input_slots, &data, FLAGS_batch_size);
126-
std::vector<PaddleTensor> outputs;
127152

128-
Timer timer;
129153
timer.tic();
130154
for (int i = 0; i < FLAGS_repeat; i++) {
131155
predictor->Run(input_slots, &outputs);
132156
}
133-
LOG(INFO) << "===========profile result===========";
134-
LOG(INFO) << "batch_size: " << FLAGS_batch_size
135-
<< ", repeat: " << FLAGS_repeat
136-
<< ", latency: " << timer.toc() / FLAGS_repeat << "ms";
137-
LOG(INFO) << "=====================================";
157+
PrintTime(FLAGS_batch_size, FLAGS_repeat, 1, 0, timer.toc() / FLAGS_repeat);
138158

139159
PADDLE_ENFORCE(outputs.size(), 1UL);
140160
auto &out = outputs[0];

paddle/fluid/inference/analysis/analyzer_text_classification_tester.cc

Lines changed: 48 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,10 @@
1616
#include <gflags/gflags.h>
1717
#include <glog/logging.h> // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files.
1818
#include <gtest/gtest.h>
19+
#include <fstream>
1920
#include "paddle/fluid/framework/ir/pass.h"
2021
#include "paddle/fluid/inference/analysis/ut_helper.h"
22+
#include "paddle/fluid/inference/api/helper.h"
2123
#include "paddle/fluid/inference/api/paddle_inference_api.h"
2224
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
2325
#include "paddle/fluid/inference/api/timer.h"
@@ -26,60 +28,72 @@ DEFINE_string(infer_model, "", "Directory of the inference model.");
2628
DEFINE_string(infer_data, "", "Path of the dataset.");
2729
DEFINE_int32(batch_size, 1, "batch size.");
2830
DEFINE_int32(repeat, 1, "How many times to repeat run.");
31+
DEFINE_int32(topn, -1, "Run top n batches of data to save time");
2932

3033
namespace paddle {
34+
namespace inference {
3135

32-
template <typename T>
33-
std::string to_string(const std::vector<T> &vec) {
34-
std::stringstream ss;
35-
for (const auto &c : vec) {
36-
ss << c << " ";
37-
}
38-
return ss.str();
39-
}
36+
struct DataReader {
37+
explicit DataReader(const std::string &path)
38+
: file(new std::ifstream(path)) {}
4039

41-
void PrintTime(const double latency, const int bs, const int repeat) {
42-
LOG(INFO) << "===========profile result===========";
43-
LOG(INFO) << "batch_size: " << bs << ", repeat: " << repeat
44-
<< ", avg latency: " << latency / repeat << "ms";
45-
LOG(INFO) << "=====================================";
46-
}
40+
bool NextBatch(PaddleTensor *tensor, int batch_size) {
41+
PADDLE_ENFORCE_EQ(batch_size, 1);
42+
std::string line;
43+
tensor->lod.clear();
44+
tensor->lod.emplace_back(std::vector<size_t>({0}));
45+
std::vector<int64_t> data;
4746

48-
void Main(int batch_size) {
49-
// Three sequence inputs.
50-
std::vector<PaddleTensor> input_slots(1);
51-
// one batch starts
52-
// data --
53-
int64_t data0[] = {0, 1, 2};
54-
for (auto &input : input_slots) {
55-
input.data.Reset(data0, sizeof(data0));
56-
input.shape = std::vector<int>({3, 1});
57-
// dtype --
58-
input.dtype = PaddleDType::INT64;
59-
// LoD --
60-
input.lod = std::vector<std::vector<size_t>>({{0, 3}});
47+
for (int i = 0; i < batch_size; i++) {
48+
if (!std::getline(*file, line)) return false;
49+
inference::split_to_int64(line, ' ', &data);
50+
}
51+
tensor->lod.front().push_back(data.size());
52+
53+
tensor->data.Resize(data.size() * sizeof(int64_t));
54+
memcpy(tensor->data.data(), data.data(), data.size() * sizeof(int64_t));
55+
tensor->shape.clear();
56+
tensor->shape.push_back(data.size());
57+
tensor->shape.push_back(1);
58+
return true;
6159
}
6260

61+
std::unique_ptr<std::ifstream> file;
62+
};
63+
64+
void Main(int batch_size) {
6365
// shape --
6466
// Create Predictor --
6567
AnalysisConfig config;
6668
config.model_dir = FLAGS_infer_model;
6769
config.use_gpu = false;
6870
config.enable_ir_optim = true;
69-
config.ir_passes.push_back("fc_lstm_fuse_pass");
7071
auto predictor =
7172
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
7273
config);
7374

75+
std::vector<PaddleTensor> input_slots(1);
76+
// one batch starts
77+
// data --
78+
auto &input = input_slots[0];
79+
input.dtype = PaddleDType::INT64;
80+
7481
inference::Timer timer;
7582
double sum = 0;
7683
std::vector<PaddleTensor> output_slots;
77-
for (int i = 0; i < FLAGS_repeat; i++) {
78-
timer.tic();
79-
CHECK(predictor->Run(input_slots, &output_slots));
80-
sum += timer.toc();
84+
85+
int num_batches = 0;
86+
for (int t = 0; t < FLAGS_repeat; t++) {
87+
DataReader reader(FLAGS_infer_data);
88+
while (reader.NextBatch(&input, FLAGS_batch_size)) {
89+
if (FLAGS_topn > 0 && num_batches > FLAGS_topn) break;
90+
timer.tic();
91+
CHECK(predictor->Run(input_slots, &output_slots));
92+
sum += timer.toc();
93+
++num_batches;
94+
}
8195
}
82-
PrintTime(sum, batch_size, FLAGS_repeat);
96+
PrintTime(batch_size, FLAGS_repeat, 1, 0, sum / FLAGS_repeat);
8397

8498
// Get output
8599
LOG(INFO) << "get outputs " << output_slots.size();
@@ -100,4 +114,5 @@ void Main(int batch_size) {
100114

101115
TEST(text_classification, basic) { Main(FLAGS_batch_size); }
102116

117+
} // namespace inference
103118
} // namespace paddle

0 commit comments

Comments
 (0)