@@ -116,29 +116,6 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark,
116
116
return feature_out
117
117
118
118
119
- def to_lodtensor (data , place ):
120
- seq_lens = [len (seq ) for seq in data ]
121
- cur_len = 0
122
- lod = [cur_len ]
123
- for l in seq_lens :
124
- cur_len += l
125
- lod .append (cur_len )
126
- flattened_data = np .concatenate (data , axis = 0 ).astype ("int64" )
127
- flattened_data = flattened_data .reshape ([len (flattened_data ), 1 ])
128
- res = fluid .LoDTensor ()
129
- res .set (flattened_data , place )
130
- res .set_lod ([lod ])
131
- return res
132
-
133
-
134
- def create_random_lodtensor (lod , place , low , high ):
135
- data = np .random .random_integers (low , high , [lod [- 1 ], 1 ]).astype ("int64" )
136
- res = fluid .LoDTensor ()
137
- res .set (data , place )
138
- res .set_lod ([lod ])
139
- return res
140
-
141
-
142
119
def train (use_cuda , save_dirname = None , is_local = True ):
143
120
# define network topology
144
121
word = fluid .layers .data (
@@ -271,23 +248,35 @@ def infer(use_cuda, save_dirname=None):
271
248
[inference_program , feed_target_names ,
272
249
fetch_targets ] = fluid .io .load_inference_model (save_dirname , exe )
273
250
274
- lod = [0 , 4 , 10 ]
275
- word = create_random_lodtensor (
276
- lod , place , low = 0 , high = word_dict_len - 1 )
277
- pred = create_random_lodtensor (
278
- lod , place , low = 0 , high = pred_dict_len - 1 )
279
- ctx_n2 = create_random_lodtensor (
280
- lod , place , low = 0 , high = word_dict_len - 1 )
281
- ctx_n1 = create_random_lodtensor (
282
- lod , place , low = 0 , high = word_dict_len - 1 )
283
- ctx_0 = create_random_lodtensor (
284
- lod , place , low = 0 , high = word_dict_len - 1 )
285
- ctx_p1 = create_random_lodtensor (
286
- lod , place , low = 0 , high = word_dict_len - 1 )
287
- ctx_p2 = create_random_lodtensor (
288
- lod , place , low = 0 , high = word_dict_len - 1 )
289
- mark = create_random_lodtensor (
290
- lod , place , low = 0 , high = mark_dict_len - 1 )
251
+ # Setup inputs by creating LoDTensors to represent sequences of words.
252
+ # Here each word is the basic element of these LoDTensors and the shape of
253
+ # each word (base_shape) should be [1] since it is simply an index to
254
+ # look up for the corresponding word vector.
255
+ # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
256
+ # which has only one lod level. Then the created LoDTensors will have only
257
+ # one higher level structure (sequence of words, or sentence) than the basic
258
+ # element (word). Hence the LoDTensor will hold data for three sentences of
259
+ # length 3, 4 and 2, respectively.
260
+ # Note that lod info should be a list of lists.
261
+ lod = [[3 , 4 , 2 ]]
262
+ base_shape = [1 ]
263
+ # The range of random integers is [low, high]
264
+ word = fluid .create_random_int_lodtensor (
265
+ lod , base_shape , place , low = 0 , high = word_dict_len - 1 )
266
+ pred = fluid .create_random_int_lodtensor (
267
+ lod , base_shape , place , low = 0 , high = pred_dict_len - 1 )
268
+ ctx_n2 = fluid .create_random_int_lodtensor (
269
+ lod , base_shape , place , low = 0 , high = word_dict_len - 1 )
270
+ ctx_n1 = fluid .create_random_int_lodtensor (
271
+ lod , base_shape , place , low = 0 , high = word_dict_len - 1 )
272
+ ctx_0 = fluid .create_random_int_lodtensor (
273
+ lod , base_shape , place , low = 0 , high = word_dict_len - 1 )
274
+ ctx_p1 = fluid .create_random_int_lodtensor (
275
+ lod , base_shape , place , low = 0 , high = word_dict_len - 1 )
276
+ ctx_p2 = fluid .create_random_int_lodtensor (
277
+ lod , base_shape , place , low = 0 , high = word_dict_len - 1 )
278
+ mark = fluid .create_random_int_lodtensor (
279
+ lod , base_shape , place , low = 0 , high = mark_dict_len - 1 )
291
280
292
281
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
293
282
# and results will contain a list of data corresponding to fetch_targets.
0 commit comments