Skip to content

Commit dbc6102

Browse files
committed
simplify label_sementic_example
1 parent d4c2164 commit dbc6102

File tree

1 file changed

+27
-40
lines changed

1 file changed

+27
-40
lines changed

python/paddle/fluid/tests/book/test_label_semantic_roles.py

Lines changed: 27 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -116,29 +116,6 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark,
116116
return feature_out
117117

118118

119-
def to_lodtensor(data, place):
120-
seq_lens = [len(seq) for seq in data]
121-
cur_len = 0
122-
lod = [cur_len]
123-
for l in seq_lens:
124-
cur_len += l
125-
lod.append(cur_len)
126-
flattened_data = np.concatenate(data, axis=0).astype("int64")
127-
flattened_data = flattened_data.reshape([len(flattened_data), 1])
128-
res = fluid.LoDTensor()
129-
res.set(flattened_data, place)
130-
res.set_lod([lod])
131-
return res
132-
133-
134-
def create_random_lodtensor(lod, place, low, high):
135-
data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64")
136-
res = fluid.LoDTensor()
137-
res.set(data, place)
138-
res.set_lod([lod])
139-
return res
140-
141-
142119
def train(use_cuda, save_dirname=None, is_local=True):
143120
# define network topology
144121
word = fluid.layers.data(
@@ -271,23 +248,33 @@ def infer(use_cuda, save_dirname=None):
271248
[inference_program, feed_target_names,
272249
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
273250

274-
lod = [0, 4, 10]
275-
word = create_random_lodtensor(
276-
lod, place, low=0, high=word_dict_len - 1)
277-
pred = create_random_lodtensor(
278-
lod, place, low=0, high=pred_dict_len - 1)
279-
ctx_n2 = create_random_lodtensor(
280-
lod, place, low=0, high=word_dict_len - 1)
281-
ctx_n1 = create_random_lodtensor(
282-
lod, place, low=0, high=word_dict_len - 1)
283-
ctx_0 = create_random_lodtensor(
284-
lod, place, low=0, high=word_dict_len - 1)
285-
ctx_p1 = create_random_lodtensor(
286-
lod, place, low=0, high=word_dict_len - 1)
287-
ctx_p2 = create_random_lodtensor(
288-
lod, place, low=0, high=word_dict_len - 1)
289-
mark = create_random_lodtensor(
290-
lod, place, low=0, high=mark_dict_len - 1)
251+
# Setup inputs by creating LoDTensors to represent sequences of words.
252+
# Here each word is the basic element of these LoDTensors and the shape of
253+
# each word (base_shape) should be [1] since it is simply an index to
254+
# look up for the corresponding word vector.
255+
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
256+
# which has only one lod level. Then the created LoDTensors will have only
257+
# one higher level structure (sequence of words, or sentence) than the basic
258+
# element (word). Hence the LoDTensor will hold data for three sentences of
259+
# length 3, 4 and 2, respectively.
260+
lod = [[3, 4, 2]]
261+
base_shape = [1]
262+
word = fluid.create_random_lodtensor(
263+
lod, base_shape, place, low=0, high=word_dict_len - 1)
264+
pred = fluid.create_random_lodtensor(
265+
lod, base_shape, place, low=0, high=pred_dict_len - 1)
266+
ctx_n2 = fluid.create_random_lodtensor(
267+
lod, base_shape, place, low=0, high=word_dict_len - 1)
268+
ctx_n1 = fluid.create_random_lodtensor(
269+
lod, base_shape, place, low=0, high=word_dict_len - 1)
270+
ctx_0 = fluid.create_random_lodtensor(
271+
lod, base_shape, place, low=0, high=word_dict_len - 1)
272+
ctx_p1 = fluid.create_random_lodtensor(
273+
lod, base_shape, place, low=0, high=word_dict_len - 1)
274+
ctx_p2 = fluid.create_random_lodtensor(
275+
lod, base_shape, place, low=0, high=word_dict_len - 1)
276+
mark = fluid.create_random_lodtensor(
277+
lod, base_shape, place, low=0, high=mark_dict_len - 1)
291278

292279
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
293280
# and results will contain a list of data corresponding to fetch_targets.

0 commit comments

Comments
 (0)