Skip to content

Commit d3b6291

Browse files
authored
Merge pull request #16643 from sfraczek/fix-repeating-passes
Cherry-pick of #16559, #16606 and #16608
2 parents 76b49f0 + bbb654e commit d3b6291

23 files changed

+378
-254
lines changed

paddle/fluid/inference/api/analysis_config.cc

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,6 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
141141

142142
void AnalysisConfig::EnableMKLDNN() {
143143
#ifdef PADDLE_WITH_MKLDNN
144-
pass_builder()->EnableMKLDNN();
145144
use_mkldnn_ = true;
146145
#else
147146
LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
@@ -234,16 +233,13 @@ void AnalysisConfig::Update() {
234233
}
235234

236235
if (use_mkldnn_) {
236+
#ifdef PADDLE_WITH_MKLDNN
237237
if (!enable_ir_optim_) {
238238
LOG(ERROR)
239239
<< "EnableMKLDNN() only works when IR optimization is enabled.";
240+
} else {
241+
pass_builder()->EnableMKLDNN();
240242
}
241-
#ifdef PADDLE_WITH_MKLDNN
242-
pass_builder()->EnableMKLDNN();
243-
use_mkldnn_ = true;
244-
#else
245-
LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
246-
use_mkldnn_ = false;
247243
#endif
248244
}
249245

@@ -255,9 +251,6 @@ void AnalysisConfig::Update() {
255251
}
256252
#ifdef PADDLE_WITH_MKLDNN
257253
pass_builder()->EnableMkldnnQuantizer();
258-
#else
259-
LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
260-
use_mkldnn_quantizer_ = false;
261254
#endif
262255
}
263256

paddle/fluid/inference/api/helper.h

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include <string>
2828
#include <vector>
2929
#include "paddle/fluid/inference/api/paddle_inference_api.h"
30+
#include "paddle/fluid/platform/enforce.h"
3031
#include "paddle/fluid/platform/port.h"
3132
#include "paddle/fluid/string/printf.h"
3233

@@ -266,17 +267,17 @@ static std::string DescribeZeroCopyTensor(const ZeroCopyTensor &tensor) {
266267
}
267268

268269
static void PrintTime(int batch_size, int repeat, int num_threads, int tid,
269-
double latency, int epoch = 1) {
270-
LOG(INFO) << "====== batch_size: " << batch_size << ", repeat: " << repeat
271-
<< ", threads: " << num_threads << ", thread id: " << tid
272-
<< ", latency: " << latency << "ms, fps: " << 1 / (latency / 1000.f)
270+
double batch_latency, int epoch = 1) {
271+
PADDLE_ENFORCE(batch_size > 0, "Non-positive batch size.");
272+
double sample_latency = batch_latency / batch_size;
273+
LOG(INFO) << "====== threads: " << num_threads << ", thread id: " << tid
273274
<< " ======";
274-
if (epoch > 1) {
275-
int samples = batch_size * epoch;
276-
LOG(INFO) << "====== sample number: " << samples
277-
<< ", average latency of each sample: " << latency / samples
278-
<< "ms ======";
279-
}
275+
LOG(INFO) << "====== batch_size: " << batch_size << ", iterations: " << epoch
276+
<< ", repetitions: " << repeat << " ======";
277+
LOG(INFO) << "====== batch latency: " << batch_latency
278+
<< "ms, number of samples: " << batch_size * epoch
279+
<< ", sample latency: " << sample_latency
280+
<< "ms, fps: " << 1000.f / sample_latency << " ======";
280281
}
281282

282283
static bool IsFileExists(const std::string &path) {

paddle/fluid/inference/api/paddle_pass_builder.cc

Lines changed: 43 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,12 @@ void PaddlePassBuilder::DeletePass(size_t idx) {
6464
passes_.erase(std::begin(passes_) + idx);
6565
}
6666

67-
void GpuPassStrategy::EnableMKLDNN() {
68-
LOG(ERROR) << "GPU not support MKLDNN yet";
67+
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
68+
analysis_passes_.push_back(pass);
6969
}
7070

71+
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }
72+
7173
// The following passes works for Anakin sub-graph engine.
7274
const std::vector<std::string> kAnakinSubgraphPasses({
7375
"infer_clean_graph_pass", //
@@ -102,12 +104,12 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
102104
use_gpu_ = true;
103105
}
104106

105-
void GpuPassStrategy::EnableMkldnnQuantizer() {
106-
LOG(ERROR) << "GPU not support MKL-DNN quantization";
107+
void GpuPassStrategy::EnableMKLDNN() {
108+
LOG(ERROR) << "GPU not support MKLDNN yet";
107109
}
108110

109-
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
110-
analysis_passes_.push_back(pass);
111+
void GpuPassStrategy::EnableMkldnnQuantizer() {
112+
LOG(ERROR) << "GPU not support MKL-DNN quantization";
111113
}
112114

113115
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
@@ -135,5 +137,39 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
135137
});
136138
use_gpu_ = false;
137139
}
138-
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }
140+
141+
void CpuPassStrategy::EnableMKLDNN() {
142+
// TODO(Superjomn) Consider the way to mix CPU with GPU.
143+
#ifdef PADDLE_WITH_MKLDNN
144+
if (!use_mkldnn_) {
145+
passes_.insert(passes_.begin(), "mkldnn_placement_pass");
146+
147+
for (auto &pass : std::vector<std::string>(
148+
{"depthwise_conv_mkldnn_pass", //
149+
"conv_bn_fuse_pass", // Execute BN passes again to
150+
"conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order
151+
"conv_bias_mkldnn_fuse_pass", //
152+
"conv3d_bias_mkldnn_fuse_pass", //
153+
"conv_elementwise_add_mkldnn_fuse_pass",
154+
"conv_relu_mkldnn_fuse_pass"})) {
155+
passes_.push_back(pass);
156+
}
157+
}
158+
use_mkldnn_ = true;
159+
#else
160+
use_mkldnn_ = false;
161+
#endif
162+
}
163+
164+
void CpuPassStrategy::EnableMkldnnQuantizer() {
165+
#ifdef PADDLE_WITH_MKLDNN
166+
if (!use_mkldnn_quantizer_) {
167+
passes_.push_back("cpu_quantize_placement_pass");
168+
}
169+
use_mkldnn_quantizer_ = true;
170+
#else
171+
use_mkldnn_quantizer_ = false;
172+
#endif
173+
}
174+
139175
} // namespace paddle

paddle/fluid/inference/api/paddle_pass_builder.h

Lines changed: 7 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -109,43 +109,16 @@ class CpuPassStrategy : public PassStrategy {
109109
CpuPassStrategy();
110110

111111
explicit CpuPassStrategy(const CpuPassStrategy &other)
112-
: PassStrategy(other.AllPasses()) {}
112+
: PassStrategy(other.AllPasses()) {
113+
use_gpu_ = other.use_gpu_;
114+
use_mkldnn_ = other.use_mkldnn_;
115+
use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
116+
}
113117

114118
virtual ~CpuPassStrategy() = default;
115119

116-
void EnableMKLDNN() override {
117-
// TODO(Superjomn) Consider the way to mix CPU with GPU.
118-
#ifdef PADDLE_WITH_MKLDNN
119-
if (!use_mkldnn_) {
120-
passes_.insert(passes_.begin(), "mkldnn_placement_pass");
121-
122-
for (auto &pass : std::vector<std::string>(
123-
{"depthwise_conv_mkldnn_pass", //
124-
"conv_bn_fuse_pass", // Execute BN passes again to
125-
"conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order
126-
"conv_bias_mkldnn_fuse_pass", //
127-
"conv3d_bias_mkldnn_fuse_pass", //
128-
"conv_relu_mkldnn_fuse_pass", //
129-
"conv_elementwise_add_mkldnn_fuse_pass"})) {
130-
passes_.push_back(pass);
131-
}
132-
}
133-
use_mkldnn_ = true;
134-
#else
135-
use_mkldnn_ = false;
136-
#endif
137-
}
138-
139-
void EnableMkldnnQuantizer() override {
140-
#ifdef PADDLE_WITH_MKLDNN
141-
if (!use_mkldnn_quantizer_) {
142-
passes_.push_back("cpu_quantize_placement_pass");
143-
}
144-
use_mkldnn_quantizer_ = true;
145-
#else
146-
use_mkldnn_quantizer_ = false;
147-
#endif
148-
}
120+
void EnableMKLDNN() override;
121+
void EnableMkldnnQuantizer() override;
149122

150123
protected:
151124
bool use_mkldnn_quantizer_{false};

paddle/fluid/inference/tests/api/CMakeLists.txt

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,11 @@ endfunction()
2626
function(inference_analysis_api_int8_test target model_dir data_dir filename)
2727
inference_analysis_test(${target} SRCS ${filename}
2828
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} benchmark
29-
ARGS --infer_model=${model_dir}/model --infer_data=${data_dir}/data.bin --batch_size=100)
29+
ARGS --infer_model=${model_dir}/model
30+
--infer_data=${data_dir}/data.bin
31+
--warmup_batch_size=100
32+
--batch_size=50
33+
--iterations=2)
3034
endfunction()
3135

3236
function(inference_analysis_api_test_with_fake_data target install_dir filename model_name)
@@ -146,22 +150,22 @@ inference_analysis_api_test_with_fake_data(test_analyzer_mobilenet_depthwise_con
146150

147151
# int8 image classification tests
148152
if(WITH_MKLDNN)
149-
set(INT8_DATA_DIR "${INFERENCE_DEMO_INSTALL_DIR}/int8")
153+
set(INT8_DATA_DIR "${INFERENCE_DEMO_INSTALL_DIR}/int8v2")
150154
if (NOT EXISTS ${INT8_DATA_DIR})
151-
inference_download_and_uncompress(${INT8_DATA_DIR} "https://paddle-inference-dist.bj.bcebos.com/int8" "imagenet_val_100.tar.gz")
155+
inference_download_and_uncompress(${INT8_DATA_DIR} "${INFERENCE_URL}/int8" "imagenet_val_100_tail.tar.gz")
152156
endif()
153157

154158
#resnet50 int8
155159
set(INT8_RESNET50_MODEL_DIR "${INT8_DATA_DIR}/resnet50")
156160
if (NOT EXISTS ${INT8_RESNET50_MODEL_DIR})
157-
inference_download_and_uncompress(${INT8_RESNET50_MODEL_DIR} "https://paddle-inference-dist.bj.bcebos.com/int8" "resnet50_int8_model.tar.gz" )
161+
inference_download_and_uncompress(${INT8_RESNET50_MODEL_DIR} "${INFERENCE_URL}/int8" "resnet50_int8_model.tar.gz" )
158162
endif()
159163
inference_analysis_api_int8_test(test_analyzer_int8_resnet50 ${INT8_RESNET50_MODEL_DIR} ${INT8_DATA_DIR} analyzer_int8_image_classification_tester.cc SERIAL)
160164

161165
#mobilenet int8
162166
set(INT8_MOBILENET_MODEL_DIR "${INT8_DATA_DIR}/mobilenet")
163167
if (NOT EXISTS ${INT8_MOBILENET_MODEL_DIR})
164-
inference_download_and_uncompress(${INT8_MOBILENET_MODEL_DIR} "https://paddle-inference-dist.bj.bcebos.com/int8" "mobilenetv1_int8_model.tar.gz" )
168+
inference_download_and_uncompress(${INT8_MOBILENET_MODEL_DIR} "${INFERENCE_URL}/int8" "mobilenetv1_int8_model.tar.gz" )
165169
endif()
166170
inference_analysis_api_int8_test(test_analyzer_int8_mobilenet ${INT8_MOBILENET_MODEL_DIR} ${INT8_DATA_DIR} analyzer_int8_image_classification_tester.cc SERIAL)
167171
endif()

paddle/fluid/inference/tests/api/analyzer_bert_tester.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ void profile(bool use_mkldnn = false) {
154154
config.EnableMKLDNN();
155155
}
156156

157-
std::vector<PaddleTensor> outputs;
157+
std::vector<std::vector<PaddleTensor>> outputs;
158158
std::vector<std::vector<PaddleTensor>> inputs;
159159
LoadInputData(&inputs);
160160
TestPrediction(reinterpret_cast<const PaddlePredictor::Config *>(&config),

paddle/fluid/inference/tests/api/analyzer_dam_tester.cc

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ void profile(bool use_mkldnn = false) {
197197
cfg.SetMKLDNNOp(op_list);
198198
}
199199

200-
std::vector<PaddleTensor> outputs;
200+
std::vector<std::vector<PaddleTensor>> outputs;
201201
std::vector<std::vector<PaddleTensor>> input_slots_all;
202202
SetInput(&input_slots_all);
203203

@@ -206,9 +206,11 @@ void profile(bool use_mkldnn = false) {
206206

207207
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) {
208208
PADDLE_ENFORCE_GT(outputs.size(), 0);
209-
size_t size = GetSize(outputs[0]);
209+
auto output = outputs.back();
210+
PADDLE_ENFORCE_GT(output.size(), 0);
211+
size_t size = GetSize(output[0]);
210212
PADDLE_ENFORCE_GT(size, 0);
211-
float *result = static_cast<float *>(outputs[0].data.data());
213+
float *result = static_cast<float *>(output[0].data.data());
212214
for (size_t i = 0; i < size; i++) {
213215
EXPECT_NEAR(result[i], result_data[i], 1e-3);
214216
}

paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc

Lines changed: 29 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@ limitations under the License. */
1717
#include "paddle/fluid/inference/api/paddle_analysis_config.h"
1818
#include "paddle/fluid/inference/tests/api/tester_helper.h"
1919

20-
DEFINE_int32(iterations, 0, "Number of iterations");
21-
2220
namespace paddle {
2321
namespace inference {
2422
namespace analysis {
@@ -30,8 +28,13 @@ void SetConfig(AnalysisConfig *cfg) {
3028
cfg->SwitchIrOptim();
3129
cfg->SwitchSpecifyInputNames(false);
3230
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
33-
3431
cfg->EnableMKLDNN();
32+
cfg->pass_builder()->SetPasses(
33+
{"infer_clean_graph_pass", "mkldnn_placement_pass",
34+
"depthwise_conv_mkldnn_pass", "conv_bn_fuse_pass",
35+
"conv_eltwiseadd_bn_fuse_pass", "conv_bias_mkldnn_fuse_pass",
36+
"conv_elementwise_add_mkldnn_fuse_pass", "conv_relu_mkldnn_fuse_pass",
37+
"fc_fuse_pass", "is_test_pass"});
3538
}
3639

3740
template <typename T>
@@ -40,8 +43,8 @@ class TensorReader {
4043
TensorReader(std::ifstream &file, size_t beginning_offset,
4144
std::vector<int> shape, std::string name)
4245
: file_(file), position(beginning_offset), shape_(shape), name_(name) {
43-
numel =
44-
std::accumulate(shape_.begin(), shape_.end(), 1, std::multiplies<T>());
46+
numel = std::accumulate(shape_.begin(), shape_.end(), size_t{1},
47+
std::multiplies<size_t>());
4548
}
4649

4750
PaddleTensor NextBatch() {
@@ -71,10 +74,14 @@ class TensorReader {
7174
};
7275

7376
std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
74-
const std::vector<std::vector<PaddleTensor>> &test_data, int num_images) {
77+
const std::vector<std::vector<PaddleTensor>> &test_data,
78+
int num_images = FLAGS_warmup_batch_size) {
7579
int test_data_batch_size = test_data[0][0].shape[0];
76-
CHECK_LE(static_cast<size_t>(num_images),
77-
test_data.size() * test_data_batch_size);
80+
auto iterations_max = test_data.size();
81+
PADDLE_ENFORCE(
82+
static_cast<size_t>(num_images) <= iterations_max * test_data_batch_size,
83+
"The requested quantization warmup data size " +
84+
std::to_string(num_images) + " is bigger than all test data size.");
7885

7986
PaddleTensor images;
8087
images.name = "input";
@@ -120,20 +127,17 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs,
120127

121128
std::vector<int> image_batch_shape{batch_size, 3, 224, 224};
122129
std::vector<int> label_batch_shape{batch_size, 1};
130+
auto images_offset_in_file = static_cast<size_t>(file.tellg());
123131
auto labels_offset_in_file =
124-
static_cast<size_t>(file.tellg()) +
125-
sizeof(float) * total_images *
126-
std::accumulate(image_batch_shape.begin() + 1,
127-
image_batch_shape.end(), 1, std::multiplies<int>());
132+
images_offset_in_file + sizeof(float) * total_images * 3 * 224 * 224;
128133

129-
TensorReader<float> image_reader(file, 0, image_batch_shape, "input");
134+
TensorReader<float> image_reader(file, images_offset_in_file,
135+
image_batch_shape, "input");
130136
TensorReader<int64_t> label_reader(file, labels_offset_in_file,
131137
label_batch_shape, "label");
132138

133-
auto iterations = total_images / batch_size;
134-
if (FLAGS_iterations > 0 && FLAGS_iterations < iterations)
135-
iterations = FLAGS_iterations;
136-
for (auto i = 0; i < iterations; i++) {
139+
auto iterations_max = total_images / batch_size;
140+
for (auto i = 0; i < iterations_max; i++) {
137141
auto images = image_reader.NextBatch();
138142
auto labels = label_reader.NextBatch();
139143
inputs->emplace_back(
@@ -148,20 +152,21 @@ TEST(Analyzer_int8_resnet50, quantization) {
148152
AnalysisConfig q_cfg;
149153
SetConfig(&q_cfg);
150154

155+
// read data from file and prepare batches with test data
151156
std::vector<std::vector<PaddleTensor>> input_slots_all;
152-
SetInput(&input_slots_all, 100);
157+
SetInput(&input_slots_all);
153158

159+
// prepare warmup batch from input data read earlier
160+
// warmup batch size can be different than batch size
154161
std::shared_ptr<std::vector<PaddleTensor>> warmup_data =
155-
GetWarmupData(input_slots_all, 100);
162+
GetWarmupData(input_slots_all);
156163

164+
// configure quantizer
157165
q_cfg.EnableMkldnnQuantizer();
158166
q_cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data);
159-
q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(100);
167+
q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(FLAGS_warmup_batch_size);
160168

161-
CompareQuantizedAndAnalysis(
162-
reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
163-
reinterpret_cast<const PaddlePredictor::Config *>(&q_cfg),
164-
input_slots_all);
169+
CompareQuantizedAndAnalysis(&cfg, &q_cfg, input_slots_all);
165170
}
166171

167172
} // namespace analysis

0 commit comments

Comments
 (0)