Skip to content

Commit 83fb834

Browse files
authored
Modify RNN encoder decoder example using new LoDTensor API (#11021)
* initial commit * modify rnn_encoder_docoder example
1 parent 21e794c commit 83fb834

File tree

1 file changed

+25
-36
lines changed

1 file changed

+25
-36
lines changed

python/paddle/fluid/tests/book/notest_rnn_encoder_decoder.py renamed to python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py

Lines changed: 25 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -152,29 +152,6 @@ def seq_to_seq_net():
152152
return avg_cost, prediction
153153

154154

155-
def to_lodtensor(data, place):
156-
seq_lens = [len(seq) for seq in data]
157-
cur_len = 0
158-
lod = [cur_len]
159-
for l in seq_lens:
160-
cur_len += l
161-
lod.append(cur_len)
162-
flattened_data = np.concatenate(data, axis=0).astype("int64")
163-
flattened_data = flattened_data.reshape([len(flattened_data), 1])
164-
res = core.LoDTensor()
165-
res.set(flattened_data, place)
166-
res.set_lod([lod])
167-
return res
168-
169-
170-
def create_random_lodtensor(lod, place, low, high):
171-
data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64")
172-
res = fluid.LoDTensor()
173-
res.set(data, place)
174-
res.set_lod([lod])
175-
return res
176-
177-
178155
def train(use_cuda, save_dirname=None):
179156
[avg_cost, prediction] = seq_to_seq_net()
180157

@@ -188,22 +165,20 @@ def train(use_cuda, save_dirname=None):
188165

189166
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
190167
exe = Executor(place)
191-
192168
exe.run(framework.default_startup_program())
193169

170+
feed_order = ['source_sequence', 'target_sequence', 'label_sequence']
171+
feed_list = [
172+
framework.default_main_program().global_block().var(var_name)
173+
for var_name in feed_order
174+
]
175+
feeder = fluid.DataFeeder(feed_list, place)
176+
194177
batch_id = 0
195178
for pass_id in xrange(2):
196179
for data in train_data():
197-
word_data = to_lodtensor(map(lambda x: x[0], data), place)
198-
trg_word = to_lodtensor(map(lambda x: x[1], data), place)
199-
trg_word_next = to_lodtensor(map(lambda x: x[2], data), place)
200-
201180
outs = exe.run(framework.default_main_program(),
202-
feed={
203-
'source_sequence': word_data,
204-
'target_sequence': trg_word,
205-
'label_sequence': trg_word_next
206-
},
181+
feed=feeder.feed(data),
207182
fetch_list=[avg_cost])
208183

209184
avg_cost_val = np.array(outs[0])
@@ -237,9 +212,23 @@ def infer(use_cuda, save_dirname=None):
237212
[inference_program, feed_target_names,
238213
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
239214

240-
lod = [0, 4, 10]
241-
word_data = create_random_lodtensor(lod, place, low=0, high=1)
242-
trg_word = create_random_lodtensor(lod, place, low=0, high=1)
215+
# Setup input by creating LoDTensor to represent sequence of words.
216+
# Here each word is the basic element of the LoDTensor and the shape of
217+
# each word (base_shape) should be [1] since it is simply an index to
218+
# look up for the corresponding word vector.
219+
# Suppose the length_based level of detail (lod) info is set to [[4, 6]],
220+
# which has only one lod level. Then the created LoDTensor will have only
221+
# one higher level structure (sequence of words, or sentence) than the basic
222+
# element (word). Hence the LoDTensor will hold data for two sentences of
223+
# length 4 and 6, respectively.
224+
# Note that lod info should be a list of lists.
225+
lod = [[4, 6]]
226+
base_shape = [1]
227+
# The range of random integers is [low, high]
228+
word_data = fluid.create_random_int_lodtensor(
229+
lod, base_shape, place, low=0, high=1)
230+
trg_word = fluid.create_random_int_lodtensor(
231+
lod, base_shape, place, low=0, high=1)
243232

244233
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
245234
# and results will contain a list of data corresponding to fetch_targets.

0 commit comments

Comments
 (0)