1
1
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2
- # # Licensed under the Apache License, Version 2.0 (the "License");
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
4
# you may not use this file except in compliance with the License.
4
5
# You may obtain a copy of the License at
5
6
#
21
22
22
23
23
24
def create_random_lodtensor (lod , place , low , high ):
25
+ # The range of data elements is [low, high]
24
26
data = np .random .random_integers (low , high , [lod [- 1 ], 1 ]).astype ("int64" )
25
27
res = fluid .LoDTensor ()
26
28
res .set (data , place )
27
29
res .set_lod ([lod ])
28
30
return res
29
31
30
32
31
- def infer (use_cuda , save_dirname = None ):
32
- if save_dirname is None :
33
- return
34
-
35
- place = fluid .CUDAPlace (0 ) if use_cuda else fluid .CPUPlace ()
36
- exe = fluid .Executor (place )
37
-
38
- # Use fluid.io.load_inference_model to obtain the inference program desc,
39
- # the feed_target_names (the names of variables that will be feeded
40
- # data using feed operators), and the fetch_targets (variables that
41
- # we want to obtain data from using fetch operators).
42
- [inference_program , feed_target_names ,
43
- fetch_targets ] = fluid .io .load_inference_model (save_dirname , exe )
44
-
45
- word_dict = paddle .dataset .imikolov .build_dict ()
46
- dict_size = len (word_dict ) - 1
47
-
48
- # Setup input, by creating 4 words, and setting up lod required for
49
- # lookup_table_op
50
- lod = [0 , 1 ]
51
- first_word = create_random_lodtensor (lod , place , low = 0 , high = dict_size )
52
- second_word = create_random_lodtensor (lod , place , low = 0 , high = dict_size )
53
- third_word = create_random_lodtensor (lod , place , low = 0 , high = dict_size )
54
- fourth_word = create_random_lodtensor (lod , place , low = 0 , high = dict_size )
55
-
56
- assert feed_target_names [0 ] == 'firstw'
57
- assert feed_target_names [1 ] == 'secondw'
58
- assert feed_target_names [2 ] == 'thirdw'
59
- assert feed_target_names [3 ] == 'forthw'
60
-
61
- # Construct feed as a dictionary of {feed_target_name: feed_target_data}
62
- # and results will contain a list of data corresponding to fetch_targets.
63
- results = exe .run (inference_program ,
64
- feed = {
65
- feed_target_names [0 ]: first_word ,
66
- feed_target_names [1 ]: second_word ,
67
- feed_target_names [2 ]: third_word ,
68
- feed_target_names [3 ]: fourth_word
69
- },
70
- fetch_list = fetch_targets ,
71
- return_numpy = False )
72
- print (results [0 ].lod ())
73
- np_data = np .array (results [0 ])
74
- print ("Inference Shape: " , np_data .shape )
75
- print ("Inference results: " , np_data )
76
-
77
-
78
- def train (use_cuda , is_sparse , parallel , save_dirname ):
33
+ def train (use_cuda , is_sparse , is_parallel , save_dirname ):
79
34
PASS_NUM = 100
80
35
EMBED_SIZE = 32
81
36
HIDDEN_SIZE = 256
@@ -130,7 +85,7 @@ def __network__(words):
130
85
forth_word = fluid .layers .data (name = 'forthw' , shape = [1 ], dtype = 'int64' )
131
86
next_word = fluid .layers .data (name = 'nextw' , shape = [1 ], dtype = 'int64' )
132
87
133
- if not parallel :
88
+ if not is_parallel :
134
89
avg_cost , predict_word = __network__ (
135
90
[first_word , second_word , third_word , forth_word , next_word ])
136
91
else :
@@ -176,11 +131,61 @@ def __network__(words):
176
131
raise AssertionError ("Cost is too large {0:2.2}" .format (avg_cost_np [0 ]))
177
132
178
133
179
- def main (use_cuda , is_sparse , parallel ):
134
+ def infer (use_cuda , save_dirname = None ):
135
+ if save_dirname is None :
136
+ return
137
+
138
+ place = fluid .CUDAPlace (0 ) if use_cuda else fluid .CPUPlace ()
139
+ exe = fluid .Executor (place )
140
+
141
+ # Use fluid.io.load_inference_model to obtain the inference program desc,
142
+ # the feed_target_names (the names of variables that will be feeded
143
+ # data using feed operators), and the fetch_targets (variables that
144
+ # we want to obtain data from using fetch operators).
145
+ [inference_program , feed_target_names ,
146
+ fetch_targets ] = fluid .io .load_inference_model (save_dirname , exe )
147
+
148
+ word_dict = paddle .dataset .imikolov .build_dict ()
149
+ dict_size = len (word_dict )
150
+
151
+ # Setup inputs, by creating 4 words, the lod of which should be [0, 1]
152
+ lod = [0 , 1 ]
153
+ first_word = create_random_lodtensor (lod , place , low = 0 , high = dict_size - 1 )
154
+ second_word = create_random_lodtensor (lod , place , low = 0 , high = dict_size - 1 )
155
+ third_word = create_random_lodtensor (lod , place , low = 0 , high = dict_size - 1 )
156
+ fourth_word = create_random_lodtensor (lod , place , low = 0 , high = dict_size - 1 )
157
+
158
+ assert feed_target_names [0 ] == 'firstw'
159
+ assert feed_target_names [1 ] == 'secondw'
160
+ assert feed_target_names [2 ] == 'thirdw'
161
+ assert feed_target_names [3 ] == 'forthw'
162
+
163
+ # Construct feed as a dictionary of {feed_target_name: feed_target_data}
164
+ # and results will contain a list of data corresponding to fetch_targets.
165
+ results = exe .run (inference_program ,
166
+ feed = {
167
+ feed_target_names [0 ]: first_word ,
168
+ feed_target_names [1 ]: second_word ,
169
+ feed_target_names [2 ]: third_word ,
170
+ feed_target_names [3 ]: fourth_word
171
+ },
172
+ fetch_list = fetch_targets ,
173
+ return_numpy = False )
174
+ print (results [0 ].lod ())
175
+ np_data = np .array (results [0 ])
176
+ print ("Inference Shape: " , np_data .shape )
177
+
178
+
179
+ def main (use_cuda , is_sparse , is_parallel ):
180
180
if use_cuda and not fluid .core .is_compiled_with_cuda ():
181
181
return
182
- save_dirname = "word2vec.inference.model"
183
- train (use_cuda , is_sparse , parallel , save_dirname )
182
+
183
+ if not is_parallel :
184
+ save_dirname = "word2vec.inference.model"
185
+ else :
186
+ save_dirname = None
187
+
188
+ train (use_cuda , is_sparse , is_parallel , save_dirname )
184
189
infer (use_cuda , save_dirname )
185
190
186
191
@@ -193,21 +198,23 @@ class W2VTest(unittest.TestCase):
193
198
pass
194
199
195
200
196
- def inject_test_method (use_cuda , is_sparse , parallel ):
201
+ def inject_test_method (use_cuda , is_sparse , is_parallel ):
197
202
fn_name = "test_{0}_{1}_{2}" .format ("cuda" if use_cuda else "cpu" , "sparse"
198
203
if is_sparse else "dense" , "parallel"
199
- if parallel else "normal" )
204
+ if is_parallel else "normal" )
200
205
201
206
def __impl__ (* args , ** kwargs ):
202
207
prog = fluid .Program ()
203
208
startup_prog = fluid .Program ()
204
209
scope = fluid .core .Scope ()
205
210
with fluid .scope_guard (scope ):
206
211
with fluid .program_guard (prog , startup_prog ):
207
- main (use_cuda = use_cuda , is_sparse = is_sparse , parallel = parallel )
212
+ main (
213
+ use_cuda = use_cuda ,
214
+ is_sparse = is_sparse ,
215
+ is_parallel = is_parallel )
208
216
209
- # run only 2 cases: use_cuda is either True or False
210
- if is_sparse == False and parallel == False :
217
+ if use_cuda and is_sparse :
211
218
fn = __impl__
212
219
else :
213
220
# skip the other test when on CI server
@@ -219,8 +226,8 @@ def __impl__(*args, **kwargs):
219
226
220
227
for use_cuda in (False , True ):
221
228
for is_sparse in (False , True ):
222
- for parallel in (False , True ):
223
- inject_test_method (use_cuda , is_sparse , parallel )
229
+ for is_parallel in (False , True ):
230
+ inject_test_method (use_cuda , is_sparse , is_parallel )
224
231
225
232
if __name__ == '__main__' :
226
233
unittest .main ()
0 commit comments