Skip to content

Commit a60957f

Browse files
committed
addd test_analyzer_mobilenet
1 parent 3d5a990 commit a60957f

File tree

3 files changed

+120
-2
lines changed

3 files changed

+120
-2
lines changed

paddle/fluid/inference/analysis/analyzer.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,10 @@ class Analyzer : public OrderedRegistry<PassManager> {
6666
// merged in a larger fuse op. The small fusion will not break the pattern of
6767
// larger fusion.
6868
const std::vector<std::string> all_ir_passes_{{
69-
// Manual update the passes here.
69+
// Manual update the passes here.
70+
#ifdef PADDLE_WITH_MKLDNN
71+
"depthwise_conv_mkldnn_pass", //
72+
#endif
7073
"attention_lstm_fuse_pass", //
7174
"seqconv_eltadd_relu_fuse_pass", //
7275
"embedding_fc_lstm_fuse_pass", //
@@ -79,7 +82,6 @@ class Analyzer : public OrderedRegistry<PassManager> {
7982
"conv_bn_fuse_pass", //
8083
"conv_eltwiseadd_bn_fuse_pass", //
8184
#ifdef PADDLE_WITH_MKLDNN
82-
"depthwise_conv_mkldnn_pass", //
8385
"conv_bias_mkldnn_fuse_pass", //
8486
"conv_relu_mkldnn_fuse_pass", //
8587
"conv_elementwise_add_mkldnn_fuse_pass", //

paddle/fluid/inference/tests/api/CMakeLists.txt

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,14 @@ inference_analysis_api_test(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_te
8282
inference_analysis_api_test_with_fake_data(test_analyzer_resnet50
8383
"${INFERENCE_DEMO_INSTALL_DIR}/resnet50" analyzer_resnet50_tester.cc "resnet50_model.tar.gz")
8484

85+
# mobilenet
86+
set(MOBILENET_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mobilenet")
87+
if (NOT EXISTS ${MOBILENET_INSTALL_DIR})
88+
inference_download_and_uncompress(${MOBILENET_INSTALL_DIR} "http://paddle-inference-dist.bj.bcebos.com/tensorrt_test" "mobilenet.tar.gz")
89+
endif()
90+
inference_analysis_test(test_analyzer_mobilenet SRCS analyzer_mobilenet_tester.cc
91+
EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${MOBILENET_INSTALL_DIR}/mobilenet)
92+
8593
# anakin
8694
if (WITH_ANAKIN AND WITH_MKL) # only needed in CI
8795
# anakin rnn1
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include <fstream>
16+
#include <iostream>
17+
#include "paddle/fluid/inference/tests/api/tester_helper.h"
18+
19+
namespace paddle {
20+
namespace inference {
21+
namespace analysis {
22+
23+
void SetConfig(AnalysisConfig *cfg) {
24+
cfg->model_dir = FLAGS_infer_model;
25+
cfg->use_gpu = false;
26+
cfg->device = 0;
27+
cfg->enable_ir_optim = true;
28+
cfg->specify_input_name = true;
29+
}
30+
31+
void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
32+
PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data.");
33+
34+
PaddleTensor input;
35+
// channel=3, height/width=318
36+
std::vector<int> shape({FLAGS_batch_size, 3, 318, 318});
37+
input.shape = shape;
38+
input.dtype = PaddleDType::FLOAT32;
39+
40+
// fill input data, for profile easily, do not use random data here.
41+
size_t size = FLAGS_batch_size * 3 * 318 * 318;
42+
input.data.Resize(size * sizeof(float));
43+
float *input_data = static_cast<float *>(input.data.data());
44+
for (size_t i = 0; i < size; i++) {
45+
*(input_data + i) = static_cast<float>(i) / size;
46+
}
47+
48+
std::vector<PaddleTensor> input_slots;
49+
input_slots.assign({input});
50+
(*inputs).emplace_back(input_slots);
51+
}
52+
53+
// Easy for profiling independently.
54+
void profile(bool use_mkldnn = false) {
55+
AnalysisConfig cfg;
56+
SetConfig(&cfg);
57+
cfg._use_mkldnn = use_mkldnn;
58+
std::vector<PaddleTensor> outputs;
59+
60+
std::vector<std::vector<PaddleTensor>> input_slots_all;
61+
SetInput(&input_slots_all);
62+
TestPrediction(cfg, input_slots_all, &outputs, FLAGS_num_threads);
63+
64+
if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) {
65+
PADDLE_ENFORCE_EQ(outputs.size(), 1UL);
66+
size_t size = GetSize(outputs[0]);
67+
// output is a 1000-dimension feature
68+
EXPECT_EQ(size, 1000 * FLAGS_batch_size);
69+
}
70+
}
71+
72+
TEST(Analyzer_mobilenet, profile) { profile(); }
73+
#ifdef PADDLE_WITH_MKLDNN
74+
TEST(Analyzer_mobilenet, profile_mkldnn) { profile(true /* use_mkldnn */); }
75+
#endif
76+
77+
// Check the depthwise_conv status
78+
TEST(Analyzer_mobilenet, depthwise_conv_statis) {
79+
AnalysisConfig cfg;
80+
SetConfig(&cfg);
81+
cfg._use_mkldnn = true;
82+
int num_ops;
83+
auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
84+
auto fuse_statis = GetFuseStatis(
85+
static_cast<AnalysisPredictor *>(predictor.get()), &num_ops);
86+
ASSERT_TRUE(fuse_statis.count("depthwise_conv_mkldnn_pass"));
87+
EXPECT_EQ(fuse_statis.at("depthwise_conv_mkldnn_pass"), 13);
88+
}
89+
90+
// Compare result of NativeConfig and AnalysisConfig
91+
void compare(bool use_mkldnn = false) {
92+
AnalysisConfig cfg;
93+
SetConfig(&cfg);
94+
cfg._use_mkldnn = use_mkldnn;
95+
96+
std::vector<std::vector<PaddleTensor>> input_slots_all;
97+
SetInput(&input_slots_all);
98+
CompareNativeAndAnalysis(cfg, input_slots_all);
99+
}
100+
101+
TEST(Analyzer_mobilenet, compare) { compare(); }
102+
#ifdef PADDLE_WITH_MKLDNN
103+
TEST(Analyzer_mobilenet, compare_mkldnn) { compare(true /* use_mkldnn */); }
104+
#endif
105+
106+
} // namespace analysis
107+
} // namespace inference
108+
} // namespace paddle

0 commit comments

Comments
 (0)