Skip to content

Commit 53c6890

Browse files
committed
test_analyzer_int8 tests use default pass order
test=release/1.4
1 parent ab6600e commit 53c6890

File tree

3 files changed

+17
-22
lines changed

3 files changed

+17
-22
lines changed

paddle/fluid/inference/api/paddle_pass_builder.cc

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,8 @@ const std::vector<std::string> kAnakinSubgraphPasses({
8686

8787
GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
8888
passes_.assign({
89-
"infer_clean_graph_pass", //
89+
"infer_clean_graph_pass", //
90+
"runtime_context_cache_pass", //
9091
// "identity_scale_op_clean_pass", //
9192
"conv_affine_channel_fuse_pass", //
9293
"conv_eltwiseadd_affine_channel_fuse_pass", //
@@ -96,7 +97,6 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
9697
"conv_elementwise_add_act_fuse_pass", //
9798
"conv_elementwise_add2_act_fuse_pass", //
9899
"conv_elementwise_add_fuse_pass", //
99-
"runtime_context_cache_pass", //
100100
#endif //
101101
"transpose_flatten_concat_fuse_pass",
102102
});
@@ -116,7 +116,11 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
116116
// NOTE the large fusions should be located in the front, so that they will
117117
// not be damaged by smaller ones.
118118
passes_.assign({
119-
"infer_clean_graph_pass", //
119+
"infer_clean_graph_pass", //
120+
// TODO(luotao): runtime_context_cache_pass should be located in the
121+
// front, see https://github.com/PaddlePaddle/Paddle/issues/16609,
122+
// will enhance this pass later.
123+
"runtime_context_cache_pass", //
120124
"attention_lstm_fuse_pass", //
121125
"seqpool_concat_fuse_pass", //
122126
"seqconv_eltadd_relu_fuse_pass", //
@@ -132,8 +136,6 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
132136
"conv_bn_fuse_pass", //
133137
"conv_eltwiseadd_bn_fuse_pass", //
134138
"is_test_pass", //
135-
"identity_scale_op_clean_pass", //
136-
"runtime_context_cache_pass", //
137139
});
138140
use_gpu_ = false;
139141
}

paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -23,18 +23,11 @@ namespace analysis {
2323

2424
void SetConfig(AnalysisConfig *cfg) {
2525
cfg->SetModel(FLAGS_infer_model);
26-
cfg->SetProgFile("__model__");
2726
cfg->DisableGpu();
2827
cfg->SwitchIrOptim();
29-
cfg->SwitchSpecifyInputNames(false);
28+
cfg->SwitchSpecifyInputNames();
3029
cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads);
3130
cfg->EnableMKLDNN();
32-
cfg->pass_builder()->SetPasses(
33-
{"infer_clean_graph_pass", "mkldnn_placement_pass",
34-
"depthwise_conv_mkldnn_pass", "conv_bn_fuse_pass",
35-
"conv_eltwiseadd_bn_fuse_pass", "conv_bias_mkldnn_fuse_pass",
36-
"conv_elementwise_add_mkldnn_fuse_pass", "conv_relu_mkldnn_fuse_pass",
37-
"fc_fuse_pass", "is_test_pass"});
3831
}
3932

4033
template <typename T>
@@ -84,13 +77,13 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
8477
std::to_string(num_images) + " is bigger than all test data size.");
8578

8679
PaddleTensor images;
87-
images.name = "input";
80+
images.name = "image";
8881
images.shape = {num_images, 3, 224, 224};
8982
images.dtype = PaddleDType::FLOAT32;
9083
images.data.Resize(sizeof(float) * num_images * 3 * 224 * 224);
9184

9285
PaddleTensor labels;
93-
labels.name = "labels";
86+
labels.name = "label";
9487
labels.shape = {num_images, 1};
9588
labels.dtype = PaddleDType::INT64;
9689
labels.data.Resize(sizeof(int64_t) * num_images);
@@ -132,7 +125,7 @@ void SetInput(std::vector<std::vector<PaddleTensor>> *inputs,
132125
images_offset_in_file + sizeof(float) * total_images * 3 * 224 * 224;
133126

134127
TensorReader<float> image_reader(file, images_offset_in_file,
135-
image_batch_shape, "input");
128+
image_batch_shape, "image");
136129
TensorReader<int64_t> label_reader(file, labels_offset_in_file,
137130
label_batch_shape, "label");
138131

paddle/fluid/inference/tests/api/tester_helper.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,8 @@ void PredictionRun(PaddlePredictor *predictor,
316316
int num_threads, int tid) {
317317
int num_times = FLAGS_repeat;
318318
int iterations = inputs.size(); // process the whole dataset ...
319-
if (FLAGS_iterations > 0 && FLAGS_iterations < inputs.size())
319+
if (FLAGS_iterations > 0 &&
320+
FLAGS_iterations < static_cast<int64_t>(inputs.size()))
320321
iterations =
321322
FLAGS_iterations; // ... unless the number of iterations is set
322323
outputs->resize(iterations);
@@ -329,14 +330,14 @@ void PredictionRun(PaddlePredictor *predictor,
329330
#endif
330331
if (!FLAGS_zero_copy) {
331332
run_timer.tic();
332-
for (size_t i = 0; i < iterations; i++) {
333+
for (int i = 0; i < iterations; i++) {
333334
for (int j = 0; j < num_times; j++) {
334335
predictor->Run(inputs[i], &(*outputs)[i], FLAGS_batch_size);
335336
}
336337
}
337338
elapsed_time = run_timer.toc();
338339
} else {
339-
for (size_t i = 0; i < iterations; i++) {
340+
for (int i = 0; i < iterations; i++) {
340341
ConvertPaddleTensorToZeroCopyTensor(predictor, inputs[i]);
341342
run_timer.tic();
342343
for (int j = 0; j < num_times; j++) {
@@ -366,9 +367,8 @@ void TestOneThreadPrediction(
366367
const std::vector<std::vector<PaddleTensor>> &inputs,
367368
std::vector<std::vector<PaddleTensor>> *outputs, bool use_analysis = true) {
368369
auto predictor = CreateTestPredictor(config, use_analysis);
369-
PredictionWarmUp(predictor.get(), inputs, outputs, FLAGS_paddle_num_threads,
370-
0);
371-
PredictionRun(predictor.get(), inputs, outputs, FLAGS_paddle_num_threads, 0);
370+
PredictionWarmUp(predictor.get(), inputs, outputs, 1, 0);
371+
PredictionRun(predictor.get(), inputs, outputs, 1, 0);
372372
}
373373

374374
void TestMultiThreadPrediction(

0 commit comments

Comments
 (0)