Skip to content

Commit 627ca4a

Browse files
Wojciech Usssfraczek
authored andcommitted
fix repeating passes (#16606)
1 parent fe240d9 commit 627ca4a

File tree

3 files changed

+53
-51
lines changed

3 files changed

+53
-51
lines changed

paddle/fluid/inference/api/analysis_config.cc

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,6 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
141141

142142
void AnalysisConfig::EnableMKLDNN() {
143143
#ifdef PADDLE_WITH_MKLDNN
144-
pass_builder()->EnableMKLDNN();
145144
use_mkldnn_ = true;
146145
#else
147146
LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
@@ -234,16 +233,13 @@ void AnalysisConfig::Update() {
234233
}
235234

236235
if (use_mkldnn_) {
236+
#ifdef PADDLE_WITH_MKLDNN
237237
if (!enable_ir_optim_) {
238238
LOG(ERROR)
239239
<< "EnableMKLDNN() only works when IR optimization is enabled.";
240+
} else {
241+
pass_builder()->EnableMKLDNN();
240242
}
241-
#ifdef PADDLE_WITH_MKLDNN
242-
pass_builder()->EnableMKLDNN();
243-
use_mkldnn_ = true;
244-
#else
245-
LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
246-
use_mkldnn_ = false;
247243
#endif
248244
}
249245

@@ -255,9 +251,6 @@ void AnalysisConfig::Update() {
255251
}
256252
#ifdef PADDLE_WITH_MKLDNN
257253
pass_builder()->EnableMkldnnQuantizer();
258-
#else
259-
LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
260-
use_mkldnn_quantizer_ = false;
261254
#endif
262255
}
263256

paddle/fluid/inference/api/paddle_pass_builder.cc

Lines changed: 43 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,12 @@ void PaddlePassBuilder::DeletePass(size_t idx) {
6464
passes_.erase(std::begin(passes_) + idx);
6565
}
6666

67-
void GpuPassStrategy::EnableMKLDNN() {
68-
LOG(ERROR) << "GPU not support MKLDNN yet";
67+
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
68+
analysis_passes_.push_back(pass);
6969
}
7070

71+
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }
72+
7173
// The following passes works for Anakin sub-graph engine.
7274
const std::vector<std::string> kAnakinSubgraphPasses({
7375
"infer_clean_graph_pass", //
@@ -102,12 +104,12 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
102104
use_gpu_ = true;
103105
}
104106

105-
void GpuPassStrategy::EnableMkldnnQuantizer() {
106-
LOG(ERROR) << "GPU not support MKL-DNN quantization";
107+
void GpuPassStrategy::EnableMKLDNN() {
108+
LOG(ERROR) << "GPU not support MKLDNN yet";
107109
}
108110

109-
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
110-
analysis_passes_.push_back(pass);
111+
void GpuPassStrategy::EnableMkldnnQuantizer() {
112+
LOG(ERROR) << "GPU not support MKL-DNN quantization";
111113
}
112114

113115
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
@@ -135,5 +137,39 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
135137
});
136138
use_gpu_ = false;
137139
}
138-
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }
140+
141+
void CpuPassStrategy::EnableMKLDNN() {
142+
// TODO(Superjomn) Consider the way to mix CPU with GPU.
143+
#ifdef PADDLE_WITH_MKLDNN
144+
if (!use_mkldnn_) {
145+
passes_.insert(passes_.begin(), "mkldnn_placement_pass");
146+
147+
for (auto &pass : std::vector<std::string>(
148+
{"depthwise_conv_mkldnn_pass", //
149+
"conv_bn_fuse_pass", // Execute BN passes again to
150+
"conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order
151+
"conv_bias_mkldnn_fuse_pass", //
152+
"conv3d_bias_mkldnn_fuse_pass", //
153+
"conv_elementwise_add_mkldnn_fuse_pass",
154+
"conv_relu_mkldnn_fuse_pass"})) {
155+
passes_.push_back(pass);
156+
}
157+
}
158+
use_mkldnn_ = true;
159+
#else
160+
use_mkldnn_ = false;
161+
#endif
162+
}
163+
164+
void CpuPassStrategy::EnableMkldnnQuantizer() {
165+
#ifdef PADDLE_WITH_MKLDNN
166+
if (!use_mkldnn_quantizer_) {
167+
passes_.push_back("cpu_quantize_placement_pass");
168+
}
169+
use_mkldnn_quantizer_ = true;
170+
#else
171+
use_mkldnn_quantizer_ = false;
172+
#endif
173+
}
174+
139175
} // namespace paddle

paddle/fluid/inference/api/paddle_pass_builder.h

Lines changed: 7 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -109,43 +109,16 @@ class CpuPassStrategy : public PassStrategy {
109109
CpuPassStrategy();
110110

111111
explicit CpuPassStrategy(const CpuPassStrategy &other)
112-
: PassStrategy(other.AllPasses()) {}
112+
: PassStrategy(other.AllPasses()) {
113+
use_gpu_ = other.use_gpu_;
114+
use_mkldnn_ = other.use_mkldnn_;
115+
use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
116+
}
113117

114118
virtual ~CpuPassStrategy() = default;
115119

116-
void EnableMKLDNN() override {
117-
// TODO(Superjomn) Consider the way to mix CPU with GPU.
118-
#ifdef PADDLE_WITH_MKLDNN
119-
if (!use_mkldnn_) {
120-
passes_.insert(passes_.begin(), "mkldnn_placement_pass");
121-
122-
for (auto &pass : std::vector<std::string>(
123-
{"depthwise_conv_mkldnn_pass", //
124-
"conv_bn_fuse_pass", // Execute BN passes again to
125-
"conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order
126-
"conv_bias_mkldnn_fuse_pass", //
127-
"conv3d_bias_mkldnn_fuse_pass", //
128-
"conv_relu_mkldnn_fuse_pass", //
129-
"conv_elementwise_add_mkldnn_fuse_pass"})) {
130-
passes_.push_back(pass);
131-
}
132-
}
133-
use_mkldnn_ = true;
134-
#else
135-
use_mkldnn_ = false;
136-
#endif
137-
}
138-
139-
void EnableMkldnnQuantizer() override {
140-
#ifdef PADDLE_WITH_MKLDNN
141-
if (!use_mkldnn_quantizer_) {
142-
passes_.push_back("cpu_quantize_placement_pass");
143-
}
144-
use_mkldnn_quantizer_ = true;
145-
#else
146-
use_mkldnn_quantizer_ = false;
147-
#endif
148-
}
120+
void EnableMKLDNN() override;
121+
void EnableMkldnnQuantizer() override;
149122

150123
protected:
151124
bool use_mkldnn_quantizer_{false};

0 commit comments

Comments
 (0)