Skip to content

Commit 044fb0f

Browse files
authored
Merge pull request #8511 from reyoung/feature/understand_sentimental_dynrnn
Test Parallel.Do and DynRNN
2 parents d4dabe3 + 7d247ca commit 044fb0f

File tree

2 files changed

+59
-1
lines changed

2 files changed

+59
-1
lines changed

python/paddle/v2/fluid/layers/control_flow.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -652,7 +652,8 @@ def complete(self):
652652
parent_block.append_op(
653653
type='while',
654654
inputs={
655-
'X': [parent_block.var(x_name) for x_name in x_name_list],
655+
'X':
656+
[parent_block.var_recursive(x_name) for x_name in x_name_list],
656657
'Condition': [self.cond_var]
657658
},
658659
outputs={'Out': out_vars,

python/paddle/v2/fluid/tests/book/test_understand_sentiment.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,46 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32,
4747
return avg_cost, accuracy, prediction
4848

4949

50+
def dyn_rnn_lstm(data, label, input_dim, class_dim=2, emb_dim=32,
51+
lstm_size=128):
52+
emb = fluid.layers.embedding(
53+
input=data, size=[input_dim, emb_dim], is_sparse=True)
54+
sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh')
55+
56+
rnn = fluid.layers.DynamicRNN()
57+
with rnn.block():
58+
word = rnn.step_input(sentence)
59+
prev_hidden = rnn.memory(value=0.0, shape=[lstm_size])
60+
prev_cell = rnn.memory(value=0.0, shape=[lstm_size])
61+
62+
def gate_common(ipt, hidden, size):
63+
gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True)
64+
gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False)
65+
return gate0 + gate1
66+
67+
forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
68+
lstm_size))
69+
input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
70+
lstm_size))
71+
output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
72+
lstm_size))
73+
cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden,
74+
lstm_size))
75+
76+
cell = forget_gate * prev_cell + input_gate * cell_gate
77+
hidden = output_gate * fluid.layers.tanh(x=cell)
78+
rnn.update_memory(prev_cell, cell)
79+
rnn.update_memory(prev_hidden, hidden)
80+
rnn.output(hidden)
81+
82+
last = fluid.layers.sequence_last_step(rnn())
83+
prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax")
84+
cost = fluid.layers.cross_entropy(input=prediction, label=label)
85+
avg_cost = fluid.layers.mean(x=cost)
86+
accuracy = fluid.layers.accuracy(input=prediction, label=label)
87+
return avg_cost, accuracy, prediction
88+
89+
5090
def stacked_lstm_net(data,
5191
label,
5292
input_dim,
@@ -270,6 +310,23 @@ def test_stacked_lstm_gpu_parallel(self):
270310
use_cuda=True,
271311
parallel=True)
272312

313+
@unittest.skip(reason='make CI faster')
314+
def test_dynrnn_lstm_gpu(self):
315+
with self.new_program_scope():
316+
main(
317+
self.word_dict,
318+
net_method=dyn_rnn_lstm,
319+
use_cuda=True,
320+
parallel=False)
321+
322+
def test_dynrnn_lstm_gpu_parallel(self):
323+
with self.new_program_scope():
324+
main(
325+
self.word_dict,
326+
net_method=dyn_rnn_lstm,
327+
use_cuda=True,
328+
parallel=True)
329+
273330

274331
if __name__ == '__main__':
275332
unittest.main()

0 commit comments

Comments
 (0)