Skip to content

Commit b9dbb7c

Browse files
committed
fix bias attri in mkldnn fc
1 parent 45d0259 commit b9dbb7c

File tree

2 files changed

+9
-10
lines changed

2 files changed

+9
-10
lines changed

paddle/fluid/operators/fc_mkldnn_op.cc

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -125,14 +125,16 @@ class FCMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
125125

126126
auto input = ctx.Input<Tensor>("Input");
127127
auto w = ctx.Input<Tensor>("W");
128+
auto bias = ctx.Input<Tensor>("Bias");
128129

129130
PADDLE_ENFORCE(input->dims().size() == 2 || input->dims().size() == 4,
130131
"Input must be with 2 or 4 dimensions, i.e. NCHW");
131-
// TODO(intel): the src weight is io and mkldnn weight need be transposed !
132+
// TODO(intel friends): the native weight format is io,
133+
// but the mkldnn weight format is oihw, which may need be transposed.
132134
PADDLE_ENFORCE(w->dims().size() == 2 || w->dims().size() == 4,
133135
"Weights must be with 2 or 4 dimensions, i.e. OI or OIHW");
134136

135-
bool with_bias = ctx.Attr<bool>("bias_attr");
137+
bool with_bias = bias != nullptr;
136138
MKLDNNMD<Tensor> md(input, w, with_bias);
137139

138140
std::shared_ptr<mkldnn::inner_product_forward::primitive_desc> pd =
@@ -155,6 +157,7 @@ class FCMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
155157
auto dst_memory = mem.dst(output_data);
156158
auto src_memory = mem.src(input_data);
157159
auto weights_memory = mem.weights(w_data);
160+
// TODO(intel friends): bias memory should also be obtain from bias->data()
158161
auto bias_memory = mem.bias();
159162

160163
auto forward = with_bias ? mkldnn::inner_product_forward(
@@ -217,7 +220,8 @@ class FCMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
217220
const Tensor* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
218221
const T* out_grad_data = out_grad->data<T>();
219222

220-
bool with_bias = ctx.Attr<bool>("bias_attr");
223+
auto bias = ctx.Input<Tensor>("Bias");
224+
bool with_bias = bias != nullptr;
221225

222226
MKLDNNMD<Tensor> md(input, w, with_bias);
223227
MKLDNNMemory mem(&md, mkldnn_engine);

python/paddle/fluid/tests/unittests/test_fc_mkldnn_op.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ def fully_connected_naive(input, weights, bias_data=None):
2222
w_h, w_c = weights.shape
2323

2424
x_data = np.reshape(input, [in_n, in_c * in_h * in_w])
25+
# this transpose should be implemented at C code
2526
w_data = np.transpose(np.reshape(weights, (w_c, in_c * in_h * in_w)))
2627
result = None
2728

@@ -43,15 +44,11 @@ class TestFCMKLDNNOp(OpTest):
4344
def setUp(self):
4445
self.op_type = "fc"
4546
self.use_mkldnn = True
46-
self.with_bias = True
4747
self.matrix = MatrixGenerate(1, 10, 15, 3, 3)
4848

4949
self.inputs = {'Input': self.matrix.input, 'W': self.matrix.weights}
5050

51-
self.attrs = {
52-
'use_mkldnn': self.use_mkldnn,
53-
'with_bias': self.with_bias
54-
}
51+
self.attrs = {'use_mkldnn': self.use_mkldnn, }
5552

5653
self.outputs = {
5754
'Out': fully_connected_naive(self.matrix.input, self.matrix.weights)
@@ -85,13 +82,11 @@ def init_op_type(self):
8582

8683
class TestFCMKLDNNOp4(TestFCMKLDNNOp):
8784
def init_op_type(self):
88-
self.with_bias = False
8985
self.matrix = MatrixGenerate(2, 32, 48, 2, 2)
9086

9187

9288
class TestFCMKLDNNOp4(TestFCMKLDNNOp):
9389
def init_op_type(self):
94-
self.with_bias = False
9590
self.matrix = MatrixGenerate(2, 32, 1000, 6, 6)
9691

9792

0 commit comments

Comments
 (0)