Skip to content

Commit c796e01

Browse files
committed
Refine the inference unittests.
1 parent caf9a09 commit c796e01

File tree

5 files changed

+101
-85
lines changed

5 files changed

+101
-85
lines changed

paddle/fluid/framework/lod_tensor.cc

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,14 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) {
3131
os << "{";
3232
for (auto &v : lod) {
3333
os << "{";
34+
bool is_first = true;
3435
for (auto &i : v) {
35-
os << i << ",";
36+
if (is_first) {
37+
os << i;
38+
is_first = false;
39+
} else {
40+
os << ", " << i;
41+
}
3642
}
3743
os << "}";
3844
}

paddle/fluid/inference/tests/book/test_inference_word2vec.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,12 @@ TEST(inference, word2vec) {
3131

3232
paddle::framework::LoDTensor first_word, second_word, third_word, fourth_word;
3333
paddle::framework::LoD lod{{0, 1}};
34-
int64_t dict_size = 2072; // Hard-coding the size of dictionary
34+
int64_t dict_size = 2073; // The size of dictionary
3535

36-
SetupLoDTensor(first_word, lod, static_cast<int64_t>(0), dict_size);
37-
SetupLoDTensor(second_word, lod, static_cast<int64_t>(0), dict_size);
38-
SetupLoDTensor(third_word, lod, static_cast<int64_t>(0), dict_size);
39-
SetupLoDTensor(fourth_word, lod, static_cast<int64_t>(0), dict_size);
36+
SetupLoDTensor(first_word, lod, static_cast<int64_t>(0), dict_size - 1);
37+
SetupLoDTensor(second_word, lod, static_cast<int64_t>(0), dict_size - 1);
38+
SetupLoDTensor(third_word, lod, static_cast<int64_t>(0), dict_size - 1);
39+
SetupLoDTensor(fourth_word, lod, static_cast<int64_t>(0), dict_size - 1);
4040

4141
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
4242
cpu_feeds.push_back(&first_word);

python/paddle/v2/fluid/tests/book/test_image_classification.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,9 @@ def infer(use_cuda, save_dirname=None):
182182
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
183183

184184
# The input's dimension of conv should be 4-D or 5-D.
185-
tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
185+
# Use normilized image pixels as input data, which should be in the range [0, 1.0].
186+
batch_size = 1
187+
tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32")
186188

187189
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
188190
# and results will contain a list of data corresponding to fetch_targets.

python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
word_dict, verb_dict, label_dict = conll05.get_dict()
2727
word_dict_len = len(word_dict)
2828
label_dict_len = len(label_dict)
29-
pred_len = len(verb_dict)
29+
pred_dict_len = len(verb_dict)
3030

3131
mark_dict_len = 2
3232
word_dim = 32
@@ -53,7 +53,7 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark,
5353
# 8 features
5454
predicate_embedding = fluid.layers.embedding(
5555
input=predicate,
56-
size=[pred_len, word_dim],
56+
size=[pred_dict_len, word_dim],
5757
dtype='float32',
5858
is_sparse=IS_SPARSE,
5959
param_attr='vemb')
@@ -234,6 +234,7 @@ def train(use_cuda, save_dirname=None):
234234
# Set the threshold low to speed up the CI test
235235
if float(pass_precision) > 0.05:
236236
if save_dirname is not None:
237+
# TODO(liuyiqun): Change the target to crf_decode
237238
fluid.io.save_inference_model(save_dirname, [
238239
'word_data', 'verb_data', 'ctx_n2_data',
239240
'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data',
@@ -259,14 +260,14 @@ def infer(use_cuda, save_dirname=None):
259260
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
260261

261262
lod = [0, 4, 10]
262-
ts_word = create_random_lodtensor(lod, place, low=0, high=1)
263-
ts_pred = create_random_lodtensor(lod, place, low=0, high=1)
264-
ts_ctx_n2 = create_random_lodtensor(lod, place, low=0, high=1)
265-
ts_ctx_n1 = create_random_lodtensor(lod, place, low=0, high=1)
266-
ts_ctx_0 = create_random_lodtensor(lod, place, low=0, high=1)
267-
ts_ctx_p1 = create_random_lodtensor(lod, place, low=0, high=1)
268-
ts_ctx_p2 = create_random_lodtensor(lod, place, low=0, high=1)
269-
ts_mark = create_random_lodtensor(lod, place, low=0, high=1)
263+
word = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
264+
pred = create_random_lodtensor(lod, place, low=0, high=pred_dict_len - 1)
265+
ctx_n2 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
266+
ctx_n1 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
267+
ctx_0 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
268+
ctx_p1 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
269+
ctx_p2 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
270+
mark = create_random_lodtensor(lod, place, low=0, high=mark_dict_len - 1)
270271

271272
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
272273
# and results will contain a list of data corresponding to fetch_targets.
@@ -281,14 +282,14 @@ def infer(use_cuda, save_dirname=None):
281282

282283
results = exe.run(inference_program,
283284
feed={
284-
feed_target_names[0]: ts_word,
285-
feed_target_names[1]: ts_pred,
286-
feed_target_names[2]: ts_ctx_n2,
287-
feed_target_names[3]: ts_ctx_n1,
288-
feed_target_names[4]: ts_ctx_0,
289-
feed_target_names[5]: ts_ctx_p1,
290-
feed_target_names[6]: ts_ctx_p2,
291-
feed_target_names[7]: ts_mark
285+
feed_target_names[0]: word,
286+
feed_target_names[1]: pred,
287+
feed_target_names[2]: ctx_n2,
288+
feed_target_names[3]: ctx_n1,
289+
feed_target_names[4]: ctx_0,
290+
feed_target_names[5]: ctx_p1,
291+
feed_target_names[6]: ctx_p2,
292+
feed_target_names[7]: mark
292293
},
293294
fetch_list=fetch_targets,
294295
return_numpy=False)

python/paddle/v2/fluid/tests/book/test_word2vec.py

Lines changed: 67 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2-
# # Licensed under the Apache License, Version 2.0 (the "License");
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
34
# you may not use this file except in compliance with the License.
45
# You may obtain a copy of the License at
56
#
@@ -21,61 +22,15 @@
2122

2223

2324
def create_random_lodtensor(lod, place, low, high):
25+
# The range of data elements is [low, high]
2426
data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64")
2527
res = fluid.LoDTensor()
2628
res.set(data, place)
2729
res.set_lod([lod])
2830
return res
2931

3032

31-
def infer(use_cuda, save_dirname=None):
32-
if save_dirname is None:
33-
return
34-
35-
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
36-
exe = fluid.Executor(place)
37-
38-
# Use fluid.io.load_inference_model to obtain the inference program desc,
39-
# the feed_target_names (the names of variables that will be feeded
40-
# data using feed operators), and the fetch_targets (variables that
41-
# we want to obtain data from using fetch operators).
42-
[inference_program, feed_target_names,
43-
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
44-
45-
word_dict = paddle.dataset.imikolov.build_dict()
46-
dict_size = len(word_dict) - 1
47-
48-
# Setup input, by creating 4 words, and setting up lod required for
49-
# lookup_table_op
50-
lod = [0, 1]
51-
first_word = create_random_lodtensor(lod, place, low=0, high=dict_size)
52-
second_word = create_random_lodtensor(lod, place, low=0, high=dict_size)
53-
third_word = create_random_lodtensor(lod, place, low=0, high=dict_size)
54-
fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size)
55-
56-
assert feed_target_names[0] == 'firstw'
57-
assert feed_target_names[1] == 'secondw'
58-
assert feed_target_names[2] == 'thirdw'
59-
assert feed_target_names[3] == 'forthw'
60-
61-
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
62-
# and results will contain a list of data corresponding to fetch_targets.
63-
results = exe.run(inference_program,
64-
feed={
65-
feed_target_names[0]: first_word,
66-
feed_target_names[1]: second_word,
67-
feed_target_names[2]: third_word,
68-
feed_target_names[3]: fourth_word
69-
},
70-
fetch_list=fetch_targets,
71-
return_numpy=False)
72-
print(results[0].lod())
73-
np_data = np.array(results[0])
74-
print("Inference Shape: ", np_data.shape)
75-
print("Inference results: ", np_data)
76-
77-
78-
def train(use_cuda, is_sparse, parallel, save_dirname):
33+
def train(use_cuda, is_sparse, is_parallel, save_dirname):
7934
PASS_NUM = 100
8035
EMBED_SIZE = 32
8136
HIDDEN_SIZE = 256
@@ -130,7 +85,7 @@ def __network__(words):
13085
forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')
13186
next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')
13287

133-
if not parallel:
88+
if not is_parallel:
13489
avg_cost, predict_word = __network__(
13590
[first_word, second_word, third_word, forth_word, next_word])
13691
else:
@@ -176,11 +131,61 @@ def __network__(words):
176131
raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0]))
177132

178133

179-
def main(use_cuda, is_sparse, parallel):
134+
def infer(use_cuda, save_dirname=None):
135+
if save_dirname is None:
136+
return
137+
138+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
139+
exe = fluid.Executor(place)
140+
141+
# Use fluid.io.load_inference_model to obtain the inference program desc,
142+
# the feed_target_names (the names of variables that will be feeded
143+
# data using feed operators), and the fetch_targets (variables that
144+
# we want to obtain data from using fetch operators).
145+
[inference_program, feed_target_names,
146+
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
147+
148+
word_dict = paddle.dataset.imikolov.build_dict()
149+
dict_size = len(word_dict)
150+
151+
# Setup inputs, by creating 4 words, the lod of which should be [0, 1]
152+
lod = [0, 1]
153+
first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
154+
second_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
155+
third_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
156+
fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
157+
158+
assert feed_target_names[0] == 'firstw'
159+
assert feed_target_names[1] == 'secondw'
160+
assert feed_target_names[2] == 'thirdw'
161+
assert feed_target_names[3] == 'forthw'
162+
163+
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
164+
# and results will contain a list of data corresponding to fetch_targets.
165+
results = exe.run(inference_program,
166+
feed={
167+
feed_target_names[0]: first_word,
168+
feed_target_names[1]: second_word,
169+
feed_target_names[2]: third_word,
170+
feed_target_names[3]: fourth_word
171+
},
172+
fetch_list=fetch_targets,
173+
return_numpy=False)
174+
print(results[0].lod())
175+
np_data = np.array(results[0])
176+
print("Inference Shape: ", np_data.shape)
177+
178+
179+
def main(use_cuda, is_sparse, is_parallel):
180180
if use_cuda and not fluid.core.is_compiled_with_cuda():
181181
return
182-
save_dirname = "word2vec.inference.model"
183-
train(use_cuda, is_sparse, parallel, save_dirname)
182+
183+
if not is_parallel:
184+
save_dirname = "word2vec.inference.model"
185+
else:
186+
save_dirname = None
187+
188+
train(use_cuda, is_sparse, is_parallel, save_dirname)
184189
infer(use_cuda, save_dirname)
185190

186191

@@ -193,21 +198,23 @@ class W2VTest(unittest.TestCase):
193198
pass
194199

195200

196-
def inject_test_method(use_cuda, is_sparse, parallel):
201+
def inject_test_method(use_cuda, is_sparse, is_parallel):
197202
fn_name = "test_{0}_{1}_{2}".format("cuda" if use_cuda else "cpu", "sparse"
198203
if is_sparse else "dense", "parallel"
199-
if parallel else "normal")
204+
if is_parallel else "normal")
200205

201206
def __impl__(*args, **kwargs):
202207
prog = fluid.Program()
203208
startup_prog = fluid.Program()
204209
scope = fluid.core.Scope()
205210
with fluid.scope_guard(scope):
206211
with fluid.program_guard(prog, startup_prog):
207-
main(use_cuda=use_cuda, is_sparse=is_sparse, parallel=parallel)
212+
main(
213+
use_cuda=use_cuda,
214+
is_sparse=is_sparse,
215+
is_parallel=is_parallel)
208216

209-
# run only 2 cases: use_cuda is either True or False
210-
if is_sparse == False and parallel == False:
217+
if use_cuda and is_sparse:
211218
fn = __impl__
212219
else:
213220
# skip the other test when on CI server
@@ -219,8 +226,8 @@ def __impl__(*args, **kwargs):
219226

220227
for use_cuda in (False, True):
221228
for is_sparse in (False, True):
222-
for parallel in (False, True):
223-
inject_test_method(use_cuda, is_sparse, parallel)
229+
for is_parallel in (False, True):
230+
inject_test_method(use_cuda, is_sparse, is_parallel)
224231

225232
if __name__ == '__main__':
226233
unittest.main()

0 commit comments

Comments
 (0)