Skip to content

Commit e09ac3d

Browse files
committed
replace lod name with recur_seq_lens
1 parent 67ab324 commit e09ac3d

14 files changed

+163
-97
lines changed

doc/fluid/design/concepts/lod_tensor.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,7 @@ are transformed into offsets of elements/words as follows:
173173

174174
## Slicing of LoD Tensors
175175

176+
176177
When we use the above 2-level LoD Tensor as the input to a nested-RNN, we need to retrieve certain sequences. Here we define the sequence identified by branch <i,j,...> as the **<i,j,...>-slice**.
177178

178179
For example, the <2>-slice of above example is
@@ -189,3 +190,22 @@ and the <2,0>-slice of above slice is
189190
10 12
190191
||
191192
```
193+
194+
## Length Representation vs Offset Representation
195+
196+
The offset representation is an implementation-oriented decision and it makes understanding the idea behind LoDTensor difficult.
197+
Hence, we encapsulate this implementation detail in C++ and expose the original length representation in our Python API.
198+
Specifically, we call this length representation `recursive_sequence_lengths` and users can use the following code to set or get the `recursive_sequence_lengths` of a LoDTensor in Python:
199+
```Python
200+
# length representation of lod called recursive_sequence_lengths
201+
recursive_seq_lens = [[3, 1, 2], [2, 2, 1, 3, 1, 2]]
202+
# Create a LoDTensor that has the above recursive_sequence_lengths info.
203+
# This recursive_sequence_lengths will be converted to an offset representation of LoD in the C++ implementation under the hood.
204+
tensor = fluid.LoDTensor(lod)
205+
206+
# Set/Change the recursive_sequence_lengths info of LoDTensor
207+
tensor.set_recursive_sequence_lengths([[3, 1, 2]])
208+
# Get the recursive_sequence_lengths info of a LoDTensor (the offset-based LoD representation stored in C++ will be converted
209+
# back to length-based recursive_sequence_lengths), new_recursive_seq_lens = [[3, 1, 2]]
210+
new_recursive_seq_lens = tensor.recursive_sequence_lengths()
211+
```

python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -206,35 +206,35 @@ def infer(use_cuda, inference_program, params_dirname):
206206
inferencer = fluid.Inferencer(
207207
inference_program, param_path=params_dirname, place=place)
208208

209-
# Setup inputs by creating LoDTensors to represent sequences of words.
210-
# Here each word is the basic element of these LoDTensors and the shape of
209+
# Setup input by creating LoDTensor to represent sequence of words.
210+
# Here each word is the basic element of the LoDTensor and the shape of
211211
# each word (base_shape) should be [1] since it is simply an index to
212212
# look up for the corresponding word vector.
213-
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
214-
# which has only one lod level. Then the created LoDTensors will have only
213+
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
214+
# which has only one level of detail. Then the created LoDTensor will have only
215215
# one higher level structure (sequence of words, or sentence) than the basic
216216
# element (word). Hence the LoDTensor will hold data for three sentences of
217217
# length 3, 4 and 2, respectively.
218-
# Note that lod info should be a list of lists.
219-
lod = [[3, 4, 2]]
218+
# Note that recursive_sequence_lengths should be a list of lists.
219+
recursive_seq_lens = [[3, 4, 2]]
220220
base_shape = [1]
221221
# The range of random integers is [low, high]
222222
word = fluid.create_random_int_lodtensor(
223-
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
223+
recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
224224
ctx_n2 = fluid.create_random_int_lodtensor(
225-
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
225+
recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
226226
ctx_n1 = fluid.create_random_int_lodtensor(
227-
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
227+
recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
228228
ctx_0 = fluid.create_random_int_lodtensor(
229-
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
229+
recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
230230
ctx_p1 = fluid.create_random_int_lodtensor(
231-
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
231+
recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
232232
ctx_p2 = fluid.create_random_int_lodtensor(
233-
lod, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
233+
recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1)
234234
pred = fluid.create_random_int_lodtensor(
235-
lod, base_shape, place, low=0, high=PRED_DICT_LEN - 1)
235+
recursive_seq_lens, base_shape, place, low=0, high=PRED_DICT_LEN - 1)
236236
mark = fluid.create_random_int_lodtensor(
237-
lod, base_shape, place, low=0, high=MARK_DICT_LEN - 1)
237+
recursive_seq_lens, base_shape, place, low=0, high=MARK_DICT_LEN - 1)
238238

239239
results = inferencer.infer(
240240
{

python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -215,11 +215,13 @@ def decode_main(use_cuda, is_sparse):
215215
[1. for _ in range(batch_size)], dtype='float32')
216216
init_ids_data = init_ids_data.reshape((batch_size, 1))
217217
init_scores_data = init_scores_data.reshape((batch_size, 1))
218-
init_lod = [1] * batch_size
219-
init_lod = [init_lod, init_lod]
218+
init_recursive_seq_lens = [1] * batch_size
219+
init_recursive_seq_lens = [init_recursive_seq_lens, init_recursive_seq_lens]
220220

221-
init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place)
222-
init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place)
221+
init_ids = fluid.create_lod_tensor(init_ids_data,
222+
init_init_recursive_seq_lens, place)
223+
init_scores = fluid.create_lod_tensor(init_scores_data,
224+
init_init_recursive_seq_lens, place)
223225

224226
train_data = paddle.batch(
225227
paddle.reader.shuffle(
@@ -243,7 +245,7 @@ def decode_main(use_cuda, is_sparse):
243245
feed=feed_dict,
244246
fetch_list=[translation_ids, translation_scores],
245247
return_numpy=False)
246-
print result_ids.lod()
248+
print result_ids.recursive_sequence_lengths()
247249
break
248250

249251

python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -209,13 +209,15 @@ def infer(use_cuda, inference_program, params_dirname):
209209
inference_program, param_path=params_dirname, place=place)
210210

211211
# Use the first data from paddle.dataset.movielens.test() as input.
212-
# Use create_lod_tensor(data, lod, place) API to generate LoD Tensor,
213-
# where `data` is a list of sequences of index numbers, `lod` is
214-
# the level of detail (lod) info associated with `data`.
212+
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API
213+
# to generate LoD Tensor where `data` is a list of sequences of index
214+
# numbers, `recursive_sequence_lengths` is the length-based level of detail
215+
# (lod) info associated with `data`.
215216
# For example, data = [[10, 2, 3], [2, 3]] means that it contains
216217
# two sequences of indexes, of length 3 and 2, respectively.
217-
# Correspondingly, lod = [[3, 2]] contains one level of detail info,
218-
# indicating that `data` consists of two sequences of length 3 and 2.
218+
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
219+
# level of detail info, indicating that `data` consists of two sequences
220+
# of length 3 and 2, respectively.
219221
user_id = fluid.create_lod_tensor([[1]], [[1]], place)
220222
gender_id = fluid.create_lod_tensor([[1]], [[1]], place)
221223
age_id = fluid.create_lod_tensor([[0]], [[1]], place)

python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -128,17 +128,17 @@ def infer(use_cuda, inference_program, params_dirname=None):
128128
# Here each word is the basic element of the LoDTensor and the shape of
129129
# each word (base_shape) should be [1] since it is simply an index to
130130
# look up for the corresponding word vector.
131-
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
132-
# which has only one lod level. Then the created LoDTensor will have only
131+
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
132+
# which has only one level of detail. Then the created LoDTensor will have only
133133
# one higher level structure (sequence of words, or sentence) than the basic
134134
# element (word). Hence the LoDTensor will hold data for three sentences of
135135
# length 3, 4 and 2, respectively.
136-
# Note that lod info should be a list of lists.
137-
lod = [[3, 4, 2]]
136+
# Note that recursive_sequence_lengths should be a list of lists.
137+
recursive_seq_lens = [[3, 4, 2]]
138138
base_shape = [1]
139139
# The range of random integers is [low, high]
140140
tensor_words = fluid.create_random_int_lodtensor(
141-
lod, base_shape, place, low=0, high=len(word_dict) - 1)
141+
recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
142142
results = inferencer.infer({'words': tensor_words})
143143
print("infer results: ", results)
144144

python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -143,17 +143,17 @@ def infer(use_cuda, inference_program, params_dirname=None):
143143
# Here each word is the basic element of the LoDTensor and the shape of
144144
# each word (base_shape) should be [1] since it is simply an index to
145145
# look up for the corresponding word vector.
146-
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
147-
# which has only one lod level. Then the created LoDTensor will have only
146+
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
147+
# which has only one level of detail. Then the created LoDTensor will have only
148148
# one higher level structure (sequence of words, or sentence) than the basic
149149
# element (word). Hence the LoDTensor will hold data for three sentences of
150150
# length 3, 4 and 2, respectively.
151-
# Note that lod info should be a list of lists.
152-
lod = [[3, 4, 2]]
151+
# Note that recursive_sequence_lengths should be a list of lists.
152+
recursive_seq_lens = [[3, 4, 2]]
153153
base_shape = [1]
154154
# The range of random integers is [low, high]
155155
tensor_words = fluid.create_random_int_lodtensor(
156-
lod, base_shape, place, low=0, high=len(word_dict) - 1)
156+
recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
157157
results = inferencer.infer({'words': tensor_words})
158158
print("infer results: ", results)
159159

python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -138,17 +138,17 @@ def infer(use_cuda, inference_program, params_dirname=None):
138138
# Here each word is the basic element of the LoDTensor and the shape of
139139
# each word (base_shape) should be [1] since it is simply an index to
140140
# look up for the corresponding word vector.
141-
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
142-
# which has only one lod level. Then the created LoDTensor will have only
141+
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
142+
# which has only one level of detail. Then the created LoDTensor will have only
143143
# one higher level structure (sequence of words, or sentence) than the basic
144144
# element (word). Hence the LoDTensor will hold data for three sentences of
145145
# length 3, 4 and 2, respectively.
146-
# Note that lod info should be a list of lists.
147-
lod = [[3, 4, 2]]
146+
# Note that recursive_sequence_lengths should be a list of lists.
147+
recursive_seq_lens = [[3, 4, 2]]
148148
base_shape = [1]
149149
# The range of random integers is [low, high]
150150
tensor_words = fluid.create_random_int_lodtensor(
151-
lod, base_shape, place, low=0, high=len(word_dict) - 1)
151+
recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1)
152152
results = inferencer.infer({'words': tensor_words})
153153
print("infer results: ", results)
154154

python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -124,21 +124,22 @@ def infer(use_cuda, inference_program, params_dirname=None):
124124

125125
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
126126
# is simply an index to look up for the corresponding word vector and hence
127-
# the shape of word (base_shape) should be [1]. The length-based level of
128-
# detail (lod) info of each LoDtensor should be [[1]] meaning there is only
129-
# one lod_level and there is only one sequence of one word on this level.
130-
# Note that lod info should be a list of lists.
131-
lod = [[1]]
127+
# the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
128+
# which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
129+
# meaning there is only one level of detail and there is only one sequence of
130+
# one word on this level.
131+
# Note that recursive_sequence_lengths should be a list of lists.
132+
recursive_seq_lens = [[1]]
132133
base_shape = [1]
133134
# The range of random integers is [low, high]
134135
first_word = fluid.create_random_int_lodtensor(
135-
lod, base_shape, place, low=0, high=dict_size - 1)
136+
recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
136137
second_word = fluid.create_random_int_lodtensor(
137-
lod, base_shape, place, low=0, high=dict_size - 1)
138+
recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
138139
third_word = fluid.create_random_int_lodtensor(
139-
lod, base_shape, place, low=0, high=dict_size - 1)
140+
recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
140141
fourth_word = fluid.create_random_int_lodtensor(
141-
lod, base_shape, place, low=0, high=dict_size - 1)
142+
recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
142143

143144
result = inferencer.infer(
144145
{

python/paddle/fluid/tests/book/notest_understand_sentiment.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -238,17 +238,21 @@ def infer(word_dict, use_cuda, save_dirname=None):
238238
# Here each word is the basic element of the LoDTensor and the shape of
239239
# each word (base_shape) should be [1] since it is simply an index to
240240
# look up for the corresponding word vector.
241-
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
242-
# which has only one lod level. Then the created LoDTensor will have only
241+
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
242+
# which has only one level of detail. Then the created LoDTensor will have only
243243
# one higher level structure (sequence of words, or sentence) than the basic
244244
# element (word). Hence the LoDTensor will hold data for three sentences of
245245
# length 3, 4 and 2, respectively.
246-
# Note that lod info should be a list of lists.
247-
lod = [[3, 4, 2]]
246+
# Note that recursive_sequence_lengths should be a list of lists.
247+
recursive_seq_lens = [[3, 4, 2]]
248248
base_shape = [1]
249249
# The range of random integers is [low, high]
250250
tensor_words = fluid.create_random_int_lodtensor(
251-
lod, base_shape, place, low=0, high=word_dict_len - 1)
251+
recursive_seq_lens,
252+
base_shape,
253+
place,
254+
low=0,
255+
high=word_dict_len - 1)
252256

253257
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
254258
# and results will contain a list of data corresponding to fetch_targets.
@@ -257,7 +261,7 @@ def infer(word_dict, use_cuda, save_dirname=None):
257261
feed={feed_target_names[0]: tensor_words},
258262
fetch_list=fetch_targets,
259263
return_numpy=False)
260-
print(results[0].lod())
264+
print(results[0].recursive_sequence_lengths())
261265
np_data = np.array(results[0])
262266
print("Inference Shape: ", np_data.shape)
263267
print("Inference results: ", np_data)

python/paddle/fluid/tests/book/test_label_semantic_roles.py

Lines changed: 47 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -247,35 +247,67 @@ def infer(use_cuda, save_dirname=None):
247247
[inference_program, feed_target_names,
248248
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
249249

250-
# Setup inputs by creating LoDTensors to represent sequences of words.
251-
# Here each word is the basic element of these LoDTensors and the shape of
250+
# Setup input by creating LoDTensor to represent sequence of words.
251+
# Here each word is the basic element of the LoDTensor and the shape of
252252
# each word (base_shape) should be [1] since it is simply an index to
253253
# look up for the corresponding word vector.
254-
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
255-
# which has only one lod level. Then the created LoDTensors will have only
254+
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
255+
# which has only one level of detail. Then the created LoDTensor will have only
256256
# one higher level structure (sequence of words, or sentence) than the basic
257257
# element (word). Hence the LoDTensor will hold data for three sentences of
258258
# length 3, 4 and 2, respectively.
259-
# Note that lod info should be a list of lists.
260-
lod = [[3, 4, 2]]
259+
# Note that recursive_sequence_lengths should be a list of lists.
260+
recursive_seq_lens = [[3, 4, 2]]
261261
base_shape = [1]
262262
# The range of random integers is [low, high]
263263
word = fluid.create_random_int_lodtensor(
264-
lod, base_shape, place, low=0, high=word_dict_len - 1)
264+
recursive_seq_lens,
265+
base_shape,
266+
place,
267+
low=0,
268+
high=word_dict_len - 1)
265269
pred = fluid.create_random_int_lodtensor(
266-
lod, base_shape, place, low=0, high=pred_dict_len - 1)
270+
recursive_seq_lens,
271+
base_shape,
272+
place,
273+
low=0,
274+
high=pred_dict_len - 1)
267275
ctx_n2 = fluid.create_random_int_lodtensor(
268-
lod, base_shape, place, low=0, high=word_dict_len - 1)
276+
recursive_seq_lens,
277+
base_shape,
278+
place,
279+
low=0,
280+
high=word_dict_len - 1)
269281
ctx_n1 = fluid.create_random_int_lodtensor(
270-
lod, base_shape, place, low=0, high=word_dict_len - 1)
282+
recursive_seq_lens,
283+
base_shape,
284+
place,
285+
low=0,
286+
high=word_dict_len - 1)
271287
ctx_0 = fluid.create_random_int_lodtensor(
272-
lod, base_shape, place, low=0, high=word_dict_len - 1)
288+
recursive_seq_lens,
289+
base_shape,
290+
place,
291+
low=0,
292+
high=word_dict_len - 1)
273293
ctx_p1 = fluid.create_random_int_lodtensor(
274-
lod, base_shape, place, low=0, high=word_dict_len - 1)
294+
recursive_seq_lens,
295+
base_shape,
296+
place,
297+
low=0,
298+
high=word_dict_len - 1)
275299
ctx_p2 = fluid.create_random_int_lodtensor(
276-
lod, base_shape, place, low=0, high=word_dict_len - 1)
300+
recursive_seq_lens,
301+
base_shape,
302+
place,
303+
low=0,
304+
high=word_dict_len - 1)
277305
mark = fluid.create_random_int_lodtensor(
278-
lod, base_shape, place, low=0, high=mark_dict_len - 1)
306+
recursive_seq_lens,
307+
base_shape,
308+
place,
309+
low=0,
310+
high=mark_dict_len - 1)
279311

280312
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
281313
# and results will contain a list of data corresponding to fetch_targets.
@@ -301,7 +333,7 @@ def infer(use_cuda, save_dirname=None):
301333
},
302334
fetch_list=fetch_targets,
303335
return_numpy=False)
304-
print(results[0].lod())
336+
print(results[0].recursive_sequence_lengths())
305337
np_data = np.array(results[0])
306338
print("Inference Shape: ", np_data.shape)
307339

0 commit comments

Comments
 (0)