Skip to content

Commit b9d9ef7

Browse files
authored
paddle/fluid/ modify MKLDNN [fluid_ops] - part (#74536)
1 parent 298c9d4 commit b9d9ef7

10 files changed

+20
-20
lines changed

paddle/fluid/framework/executor.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -607,7 +607,7 @@ void Executor::EnableONEDNN(const ProgramDesc& program) {
607607
}
608608
#else
609609
LOG(WARNING)
610-
<< "'MKLDNN' is not supported, Please re-compile with WITH_ONEDNN option";
610+
<< "'ONEDNN' is not supported, Please re-compile with WITH_ONEDNN option";
611611
#endif
612612
}
613613
} // namespace paddle::framework

paddle/fluid/framework/ir/conv_bn_fuse_pass.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace framework {
2323
namespace ir {
2424

2525
/*
26-
* Fuse the Conv and BatchNorm to a ConvBNMKLDNNOp.
26+
* Fuse the Conv and BatchNorm to a ConvBNONEDNNOp.
2727
*/
2828

2929
class ConvBNFusePass : public FusePassBase {

paddle/fluid/framework/ir/graph_pattern_detector.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3281,7 +3281,7 @@ PDNode *patterns::Bloat16Ops::operator()() {
32813281
return op;
32823282
}
32833283

3284-
PDNode *patterns::MKLDNNInPlace::operator()() {
3284+
PDNode *patterns::ONEDNNInPlace::operator()() {
32853285
const std::unordered_set<std::string> &supported_op_types = {
32863286
"abs", "gelu", "leaky_relu", "relu", "softmax", "sqrt", "swish", "tanh"};
32873287

paddle/fluid/framework/ir/graph_pattern_detector.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1771,8 +1771,8 @@ struct Bloat16Ops : public PatternBase {
17711771

17721772
// Pattern used for enforcing inplace computation for in-place computation
17731773
// supporting DNNL ops. softmax, batch_norm and layer_norm
1774-
struct MKLDNNInPlace : public PatternBase {
1775-
MKLDNNInPlace(PDPattern* pattern, const std::string& name_scope)
1774+
struct ONEDNNInPlace : public PatternBase {
1775+
ONEDNNInPlace(PDPattern* pattern, const std::string& name_scope)
17761776
: PatternBase(pattern, name_scope, "mkldnn_inplace") {}
17771777
PDNode* operator()();
17781778

paddle/fluid/framework/ir/onednn/matmul_elementwise_add_onednn_fuse_pass.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace paddle::framework::ir {
2323

2424
using string::PrettyLogDetail;
2525

26-
void MatmulElementwiseAddMKLDNNFusePass::ApplyImpl(Graph* graph) const {
26+
void MatmulElementwiseAddONEDNNFusePass::ApplyImpl(Graph* graph) const {
2727
auto matmul_types = {"fused_matmul", "matmul", "matmul_v2"};
2828
auto matmul_as_x = {true, false};
2929

@@ -33,7 +33,7 @@ void MatmulElementwiseAddMKLDNNFusePass::ApplyImpl(Graph* graph) const {
3333
}
3434
}
3535

36-
void MatmulElementwiseAddMKLDNNFusePass::FuseMatmulElementwiseAdd(
36+
void MatmulElementwiseAddONEDNNFusePass::FuseMatmulElementwiseAdd(
3737
Graph* graph, const std::string& matmul_type, bool matmul_as_x) const {
3838
const std::string fusion_mode = matmul_as_x ? "x" : "y";
3939
const auto name_scope = matmul_type + "_elementwise_add_as_" + fusion_mode;
@@ -87,7 +87,7 @@ void MatmulElementwiseAddMKLDNNFusePass::FuseMatmulElementwiseAdd(
8787
}
8888
}
8989

90-
MatmulElementwiseAddMKLDNNFusePass::MatmulElementwiseAddMKLDNNFusePass() {
90+
MatmulElementwiseAddONEDNNFusePass::MatmulElementwiseAddONEDNNFusePass() {
9191
AddOpCompat(OpCompat("matmul"))
9292
.AddInput("X")
9393
.IsTensor()
@@ -164,7 +164,7 @@ MatmulElementwiseAddMKLDNNFusePass::MatmulElementwiseAddMKLDNNFusePass() {
164164
} // namespace paddle::framework::ir
165165

166166
REGISTER_PASS(matmul_elementwise_add_onednn_fuse_pass,
167-
paddle::framework::ir::MatmulElementwiseAddMKLDNNFusePass);
167+
paddle::framework::ir::MatmulElementwiseAddONEDNNFusePass);
168168
REGISTER_PASS_CAPABILITY(matmul_elementwise_add_onednn_fuse_pass)
169169
.AddCombination(
170170
paddle::framework::compatible::OpVersionComparatorCombination()

paddle/fluid/framework/ir/onednn/matmul_elementwise_add_onednn_fuse_pass.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,10 @@ namespace paddle {
2121
namespace framework {
2222
namespace ir {
2323

24-
class MatmulElementwiseAddMKLDNNFusePass : public FusePassBase {
24+
class MatmulElementwiseAddONEDNNFusePass : public FusePassBase {
2525
public:
26-
MatmulElementwiseAddMKLDNNFusePass();
27-
virtual ~MatmulElementwiseAddMKLDNNFusePass() {}
26+
MatmulElementwiseAddONEDNNFusePass();
27+
virtual ~MatmulElementwiseAddONEDNNFusePass() {}
2828

2929
protected:
3030
void ApplyImpl(Graph* graph) const;

paddle/fluid/framework/op_kernel_type.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) {
107107
bool ret =
108108
(l != DataLayout::kAnyLayout && r != DataLayout::kAnyLayout && l != r);
109109
#ifdef PADDLE_WITH_DNNL
110-
// Layout transform needed for either non-MKLDNN to OneDNN or vice versa
110+
// Layout transform needed for either non-ONEDNN to OneDNN or vice versa
111111
ret |= (l != DataLayout::ONEDNN && r == DataLayout::ONEDNN);
112112
ret |= (l == DataLayout::ONEDNN && r != DataLayout::ONEDNN);
113113
#endif

paddle/fluid/pir/transforms/onednn/cpu_bfloat16_type_placement_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ class CpuBfloat16TypePattern : public pir::RewritePattern {
135135
} else {
136136
PADDLE_THROW(common::errors::Unimplemented(
137137
"result type is not DenseTensorType or VectorType, please close "
138-
"MKLDNNBf16"));
138+
"ONEDNNBf16"));
139139
}
140140
}
141141
}

paddle/fluid/pir/transforms/onednn/depthwise_conv_onednn_pass.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,9 @@ class DepthwiseConvPattern : public paddle::drr::DrrPatternBase {
8080
}
8181
};
8282

83-
class DepthwiseConvMKLDNNPass : public pir::PatternRewritePass {
83+
class DepthwiseConvONEDNNPass : public pir::PatternRewritePass {
8484
public:
85-
DepthwiseConvMKLDNNPass()
85+
DepthwiseConvONEDNNPass()
8686
: pir::PatternRewritePass("depthwise_conv_onednn_pass", 2) {}
8787

8888
pir::RewritePatternSet InitializePatterns(pir::IrContext *context) override {
@@ -97,11 +97,11 @@ class DepthwiseConvMKLDNNPass : public pir::PatternRewritePass {
9797

9898
namespace pir {
9999

100-
std::unique_ptr<Pass> CreateDepthwiseConvMKLDNNPass() {
100+
std::unique_ptr<Pass> CreateDepthwiseConvONEDNNPass() {
101101
// pd_op.depthwise_conv -> pd_op.conv2d
102-
return std::make_unique<DepthwiseConvMKLDNNPass>();
102+
return std::make_unique<DepthwiseConvONEDNNPass>();
103103
}
104104

105105
} // namespace pir
106106

107-
REGISTER_IR_PASS(depthwise_conv_onednn_pass, DepthwiseConvMKLDNNPass);
107+
REGISTER_IR_PASS(depthwise_conv_onednn_pass, DepthwiseConvONEDNNPass);

paddle/fluid/pir/transforms/onednn/depthwise_conv_onednn_pass.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,6 @@ namespace pir {
2121

2222
class Pass;
2323

24-
IR_API std::unique_ptr<Pass> CreateDepthwiseConvMKLDNNPass();
24+
IR_API std::unique_ptr<Pass> CreateDepthwiseConvONEDNNPass();
2525

2626
} // namespace pir

0 commit comments

Comments
 (0)