Skip to content

Commit 90780e2

Browse files
authored
Revert "MKLDNN layout: Support for sum operator"
1 parent 0151e4e commit 90780e2

File tree

13 files changed

+102
-411
lines changed

13 files changed

+102
-411
lines changed

paddle/fluid/operators/parallel_do_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ class ParallelDoGradOp : public framework::OperatorBase {
295295

296296
auto sum_op = framework::OpRegistry::CreateOp(
297297
"sum", {{"X", {s, tmp_name}}}, {{"Out", {s}}},
298-
framework::AttributeMap{{"use_mkldnn", {false}}});
298+
framework::AttributeMap{});
299299
VLOG(10) << sum_op->DebugStringEx(sub_scopes[0]);
300300
sum_op->Run(*sub_scopes[0], places[0]);
301301
WaitOnPlace(places[0]);

paddle/fluid/operators/recurrent_op.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -429,8 +429,7 @@ class RecurrentGradOp : public RecurrentBase {
429429

430430
auto sum_op = framework::OpRegistry::CreateOp(
431431
"sum", {{"X", {pg_names[param_id], new_inside_name}}},
432-
{{"Out", {pg_names[param_id]}}},
433-
framework::AttributeMap{{"use_mkldnn", {false}}});
432+
{{"Out", {pg_names[param_id]}}}, framework::AttributeMap{});
434433
sum_op->Run(cur_scope, place);
435434

436435
cur_scope.Rename(new_inside_name, inside_grad_name);

paddle/fluid/operators/sum_mkldnn_op.cc

Lines changed: 0 additions & 240 deletions
This file was deleted.

paddle/fluid/operators/sum_op.cc

Lines changed: 6 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,6 @@ limitations under the License. */
1818
#include "paddle/fluid/framework/var_type_inference.h"
1919
#include "paddle/fluid/operators/detail/safe_ref.h"
2020

21-
#ifdef PADDLE_WITH_MKLDNN
22-
#include "paddle/fluid/platform/mkldnn_helper.h"
23-
#endif
24-
2521
namespace paddle {
2622
namespace operators {
2723
using framework::Tensor;
@@ -67,18 +63,6 @@ class SumOp : public framework::OperatorWithKernel {
6763
framework::OpKernelType GetExpectedKernelType(
6864
const framework::ExecutionContext& ctx) const override {
6965
auto x_vars = ctx.MultiInputVar("X");
70-
71-
framework::LibraryType library{framework::LibraryType::kPlain};
72-
framework::DataLayout layout{framework::DataLayout::kAnyLayout};
73-
74-
#ifdef PADDLE_WITH_MKLDNN
75-
if (library == framework::LibraryType::kPlain &&
76-
platform::CanMKLDNNBeUsed(ctx)) {
77-
library = framework::LibraryType::kMKLDNN;
78-
layout = framework::DataLayout::kMKLDNN;
79-
}
80-
#endif
81-
8266
if (x_vars[0]->IsType<framework::LoDTensor>()) {
8367
int dtype = -1;
8468
for (auto& x_var : x_vars) {
@@ -96,27 +80,26 @@ class SumOp : public framework::OperatorWithKernel {
9680
"Sum operator should have at least one tensor");
9781

9882
return framework::OpKernelType(
99-
static_cast<framework::proto::VarType::Type>(dtype), ctx.GetPlace(),
100-
layout, library);
83+
static_cast<framework::proto::VarType::Type>(dtype),
84+
ctx.device_context());
10185
} else if (x_vars[0]->IsType<framework::SelectedRows>()) {
10286
for (auto& var : x_vars) {
10387
auto& value = var->Get<framework::SelectedRows>().value();
10488
if (value.IsInitialized()) {
10589
return framework::OpKernelType(framework::ToDataType(value.type()),
106-
ctx.device_context(), layout, library);
90+
ctx.device_context());
10791
}
10892
}
10993
// if input sparse vars are not initialized, use an default kernel type.
11094
return framework::OpKernelType(framework::proto::VarType::FP32,
111-
ctx.device_context(), layout, library);
95+
ctx.device_context());
11296
} else if (x_vars[0]->IsType<framework::LoDTensorArray>()) {
11397
for (auto& x_var : x_vars) {
11498
auto& array = x_var->Get<framework::LoDTensorArray>();
11599
for (auto& each : array) {
116100
if (each.numel() != 0) {
117101
return framework::OpKernelType(framework::ToDataType(each.type()),
118-
ctx.device_context(), layout,
119-
library);
102+
ctx.device_context());
120103
}
121104
}
122105
}
@@ -133,9 +116,6 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker {
133116
AddInput("X", "(vector<Tensor>) The input tensors of sum operator.")
134117
.AsDuplicable();
135118
AddOutput("Out", "(Tensor) The output tensor of sum operator.").Reuse("X");
136-
AddAttr<bool>("use_mkldnn",
137-
"(bool, default false) Only used in mkldnn kernel")
138-
.SetDefault(false);
139119
AddComment(R"DOC(
140120
Sum operator.
141121
@@ -152,6 +132,7 @@ class SumOpVarTypeInference : public framework::VarTypeInference {
152132
framework::BlockDesc* block) const override {
153133
auto& inputs = op_desc.Input("X");
154134
auto var_type = framework::proto::VarType::SELECTED_ROWS;
135+
155136
for (auto& name : op_desc.Input("X")) {
156137
VLOG(10) << name << " "
157138
<< block->FindRecursiveOrCreateVar(name).GetType();
@@ -225,7 +206,6 @@ namespace ops = paddle::operators;
225206

226207
REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradMaker,
227208
ops::SumOpVarTypeInference);
228-
229209
REGISTER_OP_CPU_KERNEL(
230210
sum, ops::SumKernel<paddle::platform::CPUDeviceContext, float>,
231211
ops::SumKernel<paddle::platform::CPUDeviceContext, double>,

paddle/fluid/operators/while_op.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,11 +203,11 @@ class WhileGradOp : public framework::OperatorBase {
203203
->set_lod(inside_tensor.lod());
204204
}
205205
}
206+
206207
auto new_inside_name = cur_scope.Rename(inside_grad_name);
207208
auto sum_op = framework::OpRegistry::CreateOp(
208209
"sum", {{"X", {pg_names[param_id], new_inside_name}}},
209-
{{"Out", {pg_names[param_id]}}},
210-
framework::AttributeMap{{"use_mkldnn", {false}}});
210+
{{"Out", {pg_names[param_id]}}}, framework::AttributeMap{});
211211
sum_op->Run(cur_scope, dev_place);
212212
cur_scope.Rename(new_inside_name, inside_grad_name);
213213
}

paddle/fluid/platform/mkldnn_helper.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -99,11 +99,5 @@ inline mkldnn::memory::format GetMKLDNNFormat(const mkldnn::memory memory) {
9999
memory.get_primitive_desc().desc().data.format);
100100
}
101101

102-
inline mkldnn::memory::format GetMKLDNNFormat(
103-
const mkldnn::sum::primitive_desc& memory) {
104-
return static_cast<mkldnn::memory::format>(
105-
memory.dst_primitive_desc().desc().data.format);
106-
}
107-
108102
} // namespace platform
109103
} // namespace paddle

python/paddle/fluid/backward.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -132,9 +132,9 @@ def _addup_repetitive_outputs_(op_descs):
132132
for idx, op_desc in enumerate(op_descs):
133133
for var_name in op_desc.input_arg_names():
134134
if len(renamed_vars[var_name]) > 1:
135-
pending_sum_ops.append((_create_op_desc_(
136-
"sum", {"X": renamed_vars[var_name]}, {"Out": [var_name]},
137-
{"use_mkldnn": False}), idx))
135+
pending_sum_ops.append(
136+
(_create_op_desc_("sum", {"X": renamed_vars[var_name]},
137+
{"Out": [var_name]}, {}), idx))
138138
renamed_vars[var_name] = [var_name]
139139
for var_name in op_desc.output_arg_names():
140140
if var_name == core.empty_var_name(
@@ -161,9 +161,8 @@ def _addup_repetitive_outputs_(op_descs):
161161
renamed_vars[var_name].append(new_name)
162162
for var_name, inputs in renamed_vars.iteritems():
163163
if len(inputs) > 1:
164-
pending_sum_ops.append(
165-
(_create_op_desc_("sum", {"X": inputs}, {"Out": [var_name]},
166-
{"use_mkldnn": False}), len(op_descs)))
164+
pending_sum_ops.append((_create_op_desc_(
165+
"sum", {"X": inputs}, {"Out": [var_name]}, {}), len(op_descs)))
167166
# sum_op descs are sorted according to their insert position
168167
for p in reversed(pending_sum_ops):
169168
op_descs.insert(p[1], p[0])

0 commit comments

Comments
 (0)