Skip to content

Commit fd0dd07

Browse files
authored
Merge pull request #13726 from jczaja/prv-fused_embedding_fc_lstm-ut
Unit test to Fused_embedding_fc_lstm op
2 parents ac8208b + 9f15d88 commit fd0dd07

File tree

2 files changed

+220
-8
lines changed

2 files changed

+220
-8
lines changed

paddle/fluid/operators/fused_embedding_fc_lstm_op.cc

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -93,11 +93,7 @@ void FusedEmbeddingFCLSTMOp::InferShape(
9393
ctx->SetOutputDim("Cell", out_dims);
9494
ctx->ShareLoD("Ids", "Hidden");
9595
ctx->ShareLoD("Ids", "Cell");
96-
int xx_width;
97-
if (ctx->Attrs().Get<bool>("use_seq")) {
98-
xx_width = wh_dims[1];
99-
} else {
100-
xx_width = x_dims[1] > wh_dims[1] ? wh_dims[1] : x_dims[1];
96+
if (!ctx->Attrs().Get<bool>("use_seq")) {
10197
PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"),
10298
"Assert only one Output(BatchedInput) of LSTM.");
10399
PADDLE_ENFORCE(ctx->HasOutput("BatchedHidden"),
@@ -112,7 +108,7 @@ void FusedEmbeddingFCLSTMOp::InferShape(
112108
ctx->SetOutputDim("BatchedHidden", out_dims);
113109
ctx->SetOutputDim("BatchedCell", out_dims);
114110
}
115-
ctx->SetOutputDim("XX", {x_dims[0], xx_width});
111+
ctx->SetOutputDim("XX", {x_dims[0], wh_dims[1]});
116112
ctx->ShareLoD("Ids", "XX");
117113
}
118114

@@ -435,8 +431,6 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel<T> {
435431
INIT_VEC_FUNC
436432
INIT_BASE_INPUT_DATAS
437433

438-
// std::cout << "===> Batch Compute" << std::endl;
439-
440434
auto* reordered_h0 = ctx.Output<Tensor>("ReorderedH0");
441435
auto* reordered_c0 = ctx.Output<Tensor>("ReorderedC0");
442436
auto* batched_input = ctx.Output<LoDTensor>("BatchedInput");
Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,218 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from __future__ import print_function
16+
17+
import unittest
18+
import numpy as np
19+
from op_test import OpTest
20+
from test_lstm_op import lstm, ACTIVATION
21+
22+
23+
def fc(x, w, b):
24+
return np.dot(x, w) + b
25+
26+
27+
def fused_embedded_fc_lstm(
28+
ids, # T x 1
29+
lod, # 1 x N
30+
embeddings=None, # Dict_size x M
31+
wx=None, # M x 4D
32+
bx=None, # 1 x 4D
33+
h0=None, # N x D
34+
c0=None, # N x D
35+
w_h=None, # D x 4D
36+
w_b=None, # 1 x 4D
37+
w_c=None, # 1 x 3D
38+
is_reverse=False,
39+
act_gate=None,
40+
act_cell=None,
41+
act_cand=None):
42+
# Make a lookup for embeddings and pass result into lstm reference
43+
T = ids.shape[0]
44+
M = embeddings.shape[1]
45+
x = embeddings[ids].reshape([T, M])
46+
return lstm(
47+
fc(x, wx, bx), lod, h0, c0, w_h, w_b, w_c, is_reverse, act_gate,
48+
act_cell, act_cand)
49+
50+
51+
class TestFusionLSTMOp(OpTest):
52+
def set_conf(self):
53+
pass
54+
55+
def setUp(self):
56+
self.op_type = 'fused_embedding_fc_lstm'
57+
self.lod = [[2, 3, 5, 4]]
58+
self.M = 8 # Embedding size
59+
self.D = 16 # Hidden size
60+
self.dict_size = 18
61+
self.has_initial_state = False
62+
self.use_peepholes = False
63+
self.is_reverse = False
64+
self.act_gate = 'sigmoid'
65+
self.act_cell = 'tanh'
66+
self.act_cand = 'tanh'
67+
self.set_conf()
68+
69+
T = sum(self.lod[0])
70+
bs = len(self.lod[0])
71+
72+
# this is the weight of fc
73+
wx = np.random.normal(size=(self.M, 4 * self.D)).astype('float32')
74+
# this is the bias of fc
75+
bx = np.random.normal(size=(1, 4 * self.D)).astype('float32')
76+
77+
if self.use_peepholes:
78+
b = np.random.normal(size=(1, 7 * self.D)).astype('float32')
79+
else:
80+
b = np.random.normal(size=(1, 4 * self.D)).astype('float32')
81+
w_b = np.copy(b[:, 0:4 * self.D])
82+
w_c = b[:, 4 * self.D:] if self.use_peepholes else None
83+
84+
# low is 0 , high is voc_size - 1
85+
ids = np.random.randint(
86+
low=0, high=self.dict_size - 1, size=(T, 1)).astype("int64")
87+
# embeddings as they were trained , so each entry is of M size
88+
embeddings = np.random.random(
89+
(self.dict_size, self.M)).astype("float32")
90+
91+
# multiply embeddings via Weights
92+
fc_embeddings = np.dot(embeddings, wx)
93+
94+
# bias should be manually added into the bias of this fused embedding fc LSTM
95+
b[0, 0:4 * self.D] += bx[0, :]
96+
combined_biases = b[:, 0:4 * self.D]
97+
# So let broadcast it , so they can be added
98+
ones = np.ones([self.dict_size, 1])
99+
broadcasted_biases = np.dot(ones, combined_biases)
100+
# Sum biases with Wx*embeddings
101+
fc_embeddings += broadcasted_biases
102+
103+
if self.has_initial_state:
104+
h0 = np.random.normal(size=(bs, self.D)).astype('float32')
105+
c0 = np.random.normal(size=(bs, self.D)).astype('float32')
106+
else:
107+
h0 = np.zeros((bs, self.D)).astype('float32')
108+
c0 = np.zeros((bs, self.D)).astype('float32')
109+
110+
wh = np.random.normal(size=(self.D, 4 * self.D)).astype('float32')
111+
112+
h, c = fused_embedded_fc_lstm(
113+
ids, self.lod, embeddings, wx, bx, h0, c0, wh, w_b, w_c,
114+
self.is_reverse, ACTIVATION[self.act_gate],
115+
ACTIVATION[self.act_cell], ACTIVATION[self.act_cand])
116+
117+
self.inputs = {
118+
'Ids': (ids, self.lod),
119+
'Embeddings': fc_embeddings,
120+
'WeightH': wh,
121+
'Bias': b
122+
}
123+
124+
if self.has_initial_state:
125+
self.inputs['H0'] = h0
126+
self.inputs['C0'] = c0
127+
128+
self.outputs = {
129+
'Hidden': (h, self.lod),
130+
'Cell': (c, self.lod),
131+
}
132+
self.attrs = {
133+
'use_peepholes': self.use_peepholes,
134+
'is_reverse': self.is_reverse,
135+
'gate_activation': self.act_gate,
136+
'cell_activation': self.act_cell,
137+
'candidate_activation': self.act_cand
138+
}
139+
140+
def test_check_output(self):
141+
for use_seq in {True, False}:
142+
self.attrs['use_seq'] = use_seq
143+
self.check_output()
144+
145+
146+
class TestFusionLSTMOpInit(TestFusionLSTMOp):
147+
def set_conf(self):
148+
self.has_initial_state = True
149+
150+
151+
class TestFusionLSTMOpReverse(TestFusionLSTMOp):
152+
def set_conf(self):
153+
self.is_reverse = True
154+
155+
156+
class TestFusionLSTMOpInitReverse(TestFusionLSTMOp):
157+
def set_conf(self):
158+
self.has_initial_state = True
159+
self.is_reverse = True
160+
161+
162+
class TestFusionLSTMOpMD1(TestFusionLSTMOp):
163+
def set_conf(self):
164+
self.M = 36
165+
self.D = 8
166+
167+
168+
class TestFusionLSTMOpMD2(TestFusionLSTMOp):
169+
def set_conf(self):
170+
self.M = 8
171+
self.D = 8
172+
173+
174+
class TestFusionLSTMOpMD3(TestFusionLSTMOp):
175+
def set_conf(self):
176+
self.M = 15
177+
self.D = 3
178+
179+
180+
class TestFusionLSTMOpBS1(TestFusionLSTMOp):
181+
def set_conf(self):
182+
self.lod = [[3]]
183+
self.D = 16
184+
185+
186+
class TestFusionLSTMOpPeepholes(TestFusionLSTMOp):
187+
def set_conf(self):
188+
self.use_peepholes = True
189+
190+
191+
class TestFusionLSTMOpPeepholesInit(TestFusionLSTMOp):
192+
def set_conf(self):
193+
self.use_peepholes = True
194+
self.has_initial_state = True
195+
196+
197+
class TestFusionLSTMOpPeepholesReverse(TestFusionLSTMOp):
198+
def set_conf(self):
199+
self.use_peepholes = True
200+
self.is_reverse = True
201+
202+
203+
class TestFusionLSTMOpPeepholesInitReverse(TestFusionLSTMOp):
204+
def set_conf(self):
205+
self.use_peepholes = True
206+
self.has_initial_state = True
207+
self.is_reverse = True
208+
209+
210+
class TestFusionLSTMOpPeepholesBS1(TestFusionLSTMOp):
211+
def set_conf(self):
212+
self.use_peepholes = True
213+
self.lod = [[2]]
214+
self.D = 8
215+
216+
217+
if __name__ == '__main__':
218+
unittest.main()

0 commit comments

Comments
 (0)