Skip to content

Commit 865dfbe

Browse files
committed
Use a new scope for inference in python unittest to avoid changing the value of variables for training.
1 parent f95e05a commit 865dfbe

8 files changed

+258
-229
lines changed

python/paddle/v2/fluid/tests/book/notest_rnn_encoder_decoer.py

Lines changed: 28 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -228,32 +228,34 @@ def infer(use_cuda, save_dirname=None):
228228
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
229229
exe = fluid.Executor(place)
230230

231-
# Use fluid.io.load_inference_model to obtain the inference program desc,
232-
# the feed_target_names (the names of variables that will be feeded
233-
# data using feed operators), and the fetch_targets (variables that
234-
# we want to obtain data from using fetch operators).
235-
[inference_program, feed_target_names,
236-
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
237-
238-
lod = [0, 4, 10]
239-
word_data = create_random_lodtensor(lod, place, low=0, high=1)
240-
trg_word = create_random_lodtensor(lod, place, low=0, high=1)
241-
242-
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
243-
# and results will contain a list of data corresponding to fetch_targets.
244-
assert feed_target_names[0] == 'source_sequence'
245-
assert feed_target_names[1] == 'target_sequence'
246-
results = exe.run(inference_program,
247-
feed={
248-
feed_target_names[0]: word_data,
249-
feed_target_names[1]: trg_word,
250-
},
251-
fetch_list=fetch_targets,
252-
return_numpy=False)
253-
print(results[0].lod())
254-
np_data = np.array(results[0])
255-
print("Inference shape: ", np_data.shape)
256-
print("Inference results: ", np_data)
231+
inference_scope = fluid.core.Scope()
232+
with fluid.scope_guard(inference_scope):
233+
# Use fluid.io.load_inference_model to obtain the inference program desc,
234+
# the feed_target_names (the names of variables that will be feeded
235+
# data using feed operators), and the fetch_targets (variables that
236+
# we want to obtain data from using fetch operators).
237+
[inference_program, feed_target_names,
238+
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
239+
240+
lod = [0, 4, 10]
241+
word_data = create_random_lodtensor(lod, place, low=0, high=1)
242+
trg_word = create_random_lodtensor(lod, place, low=0, high=1)
243+
244+
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
245+
# and results will contain a list of data corresponding to fetch_targets.
246+
assert feed_target_names[0] == 'source_sequence'
247+
assert feed_target_names[1] == 'target_sequence'
248+
results = exe.run(inference_program,
249+
feed={
250+
feed_target_names[0]: word_data,
251+
feed_target_names[1]: trg_word,
252+
},
253+
fetch_list=fetch_targets,
254+
return_numpy=False)
255+
print(results[0].lod())
256+
np_data = np.array(results[0])
257+
print("Inference shape: ", np_data.shape)
258+
print("Inference results: ", np_data)
257259

258260

259261
def main(use_cuda):

python/paddle/v2/fluid/tests/book/test_fit_a_line.py

Lines changed: 20 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -72,23 +72,26 @@ def infer(use_cuda, save_dirname=None):
7272
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
7373
exe = fluid.Executor(place)
7474

75-
# Use fluid.io.load_inference_model to obtain the inference program desc,
76-
# the feed_target_names (the names of variables that will be feeded
77-
# data using feed operators), and the fetch_targets (variables that
78-
# we want to obtain data from using fetch operators).
79-
[inference_program, feed_target_names,
80-
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
81-
82-
# The input's dimension should be 2-D and the second dim is 13
83-
# The input data should be >= 0
84-
batch_size = 10
85-
tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")
86-
assert feed_target_names[0] == 'x'
87-
results = exe.run(inference_program,
88-
feed={feed_target_names[0]: tensor_x},
89-
fetch_list=fetch_targets)
90-
print("infer shape: ", results[0].shape)
91-
print("infer results: ", results[0])
75+
inference_scope = fluid.core.Scope()
76+
with fluid.scope_guard(inference_scope):
77+
# Use fluid.io.load_inference_model to obtain the inference program desc,
78+
# the feed_target_names (the names of variables that will be feeded
79+
# data using feed operators), and the fetch_targets (variables that
80+
# we want to obtain data from using fetch operators).
81+
[inference_program, feed_target_names,
82+
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
83+
84+
# The input's dimension should be 2-D and the second dim is 13
85+
# The input data should be >= 0
86+
batch_size = 10
87+
tensor_x = numpy.random.uniform(0, 10,
88+
[batch_size, 13]).astype("float32")
89+
assert feed_target_names[0] == 'x'
90+
results = exe.run(inference_program,
91+
feed={feed_target_names[0]: tensor_x},
92+
fetch_list=fetch_targets)
93+
print("infer shape: ", results[0].shape)
94+
print("infer results: ", results[0])
9295

9396

9497
def main(use_cuda):

python/paddle/v2/fluid/tests/book/test_image_classification.py

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -174,24 +174,26 @@ def infer(use_cuda, save_dirname=None):
174174
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
175175
exe = fluid.Executor(place)
176176

177-
# Use fluid.io.load_inference_model to obtain the inference program desc,
178-
# the feed_target_names (the names of variables that will be feeded
179-
# data using feed operators), and the fetch_targets (variables that
180-
# we want to obtain data from using fetch operators).
181-
[inference_program, feed_target_names,
182-
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
183-
184-
# The input's dimension of conv should be 4-D or 5-D.
185-
# Use normilized image pixels as input data, which should be in the range [0, 1.0].
186-
batch_size = 1
187-
tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32")
188-
189-
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
190-
# and results will contain a list of data corresponding to fetch_targets.
191-
results = exe.run(inference_program,
192-
feed={feed_target_names[0]: tensor_img},
193-
fetch_list=fetch_targets)
194-
print("infer results: ", results[0])
177+
inference_scope = fluid.core.Scope()
178+
with fluid.scope_guard(inference_scope):
179+
# Use fluid.io.load_inference_model to obtain the inference program desc,
180+
# the feed_target_names (the names of variables that will be feeded
181+
# data using feed operators), and the fetch_targets (variables that
182+
# we want to obtain data from using fetch operators).
183+
[inference_program, feed_target_names,
184+
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
185+
186+
# The input's dimension of conv should be 4-D or 5-D.
187+
# Use normilized image pixels as input data, which should be in the range [0, 1.0].
188+
batch_size = 1
189+
tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32")
190+
191+
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
192+
# and results will contain a list of data corresponding to fetch_targets.
193+
results = exe.run(inference_program,
194+
feed={feed_target_names[0]: tensor_img},
195+
fetch_list=fetch_targets)
196+
print("infer results: ", results[0])
195197

196198

197199
def main(net_type, use_cuda):

python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py

Lines changed: 54 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -252,50 +252,60 @@ def infer(use_cuda, save_dirname=None):
252252
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
253253
exe = fluid.Executor(place)
254254

255-
# Use fluid.io.load_inference_model to obtain the inference program desc,
256-
# the feed_target_names (the names of variables that will be feeded
257-
# data using feed operators), and the fetch_targets (variables that
258-
# we want to obtain data from using fetch operators).
259-
[inference_program, feed_target_names,
260-
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
261-
262-
lod = [0, 4, 10]
263-
word = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
264-
pred = create_random_lodtensor(lod, place, low=0, high=pred_dict_len - 1)
265-
ctx_n2 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
266-
ctx_n1 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
267-
ctx_0 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
268-
ctx_p1 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
269-
ctx_p2 = create_random_lodtensor(lod, place, low=0, high=word_dict_len - 1)
270-
mark = create_random_lodtensor(lod, place, low=0, high=mark_dict_len - 1)
271-
272-
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
273-
# and results will contain a list of data corresponding to fetch_targets.
274-
assert feed_target_names[0] == 'word_data'
275-
assert feed_target_names[1] == 'verb_data'
276-
assert feed_target_names[2] == 'ctx_n2_data'
277-
assert feed_target_names[3] == 'ctx_n1_data'
278-
assert feed_target_names[4] == 'ctx_0_data'
279-
assert feed_target_names[5] == 'ctx_p1_data'
280-
assert feed_target_names[6] == 'ctx_p2_data'
281-
assert feed_target_names[7] == 'mark_data'
282-
283-
results = exe.run(inference_program,
284-
feed={
285-
feed_target_names[0]: word,
286-
feed_target_names[1]: pred,
287-
feed_target_names[2]: ctx_n2,
288-
feed_target_names[3]: ctx_n1,
289-
feed_target_names[4]: ctx_0,
290-
feed_target_names[5]: ctx_p1,
291-
feed_target_names[6]: ctx_p2,
292-
feed_target_names[7]: mark
293-
},
294-
fetch_list=fetch_targets,
295-
return_numpy=False)
296-
print(results[0].lod())
297-
np_data = np.array(results[0])
298-
print("Inference Shape: ", np_data.shape)
255+
inference_scope = fluid.core.Scope()
256+
with fluid.scope_guard(inference_scope):
257+
# Use fluid.io.load_inference_model to obtain the inference program desc,
258+
# the feed_target_names (the names of variables that will be feeded
259+
# data using feed operators), and the fetch_targets (variables that
260+
# we want to obtain data from using fetch operators).
261+
[inference_program, feed_target_names,
262+
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
263+
264+
lod = [0, 4, 10]
265+
word = create_random_lodtensor(
266+
lod, place, low=0, high=word_dict_len - 1)
267+
pred = create_random_lodtensor(
268+
lod, place, low=0, high=pred_dict_len - 1)
269+
ctx_n2 = create_random_lodtensor(
270+
lod, place, low=0, high=word_dict_len - 1)
271+
ctx_n1 = create_random_lodtensor(
272+
lod, place, low=0, high=word_dict_len - 1)
273+
ctx_0 = create_random_lodtensor(
274+
lod, place, low=0, high=word_dict_len - 1)
275+
ctx_p1 = create_random_lodtensor(
276+
lod, place, low=0, high=word_dict_len - 1)
277+
ctx_p2 = create_random_lodtensor(
278+
lod, place, low=0, high=word_dict_len - 1)
279+
mark = create_random_lodtensor(
280+
lod, place, low=0, high=mark_dict_len - 1)
281+
282+
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
283+
# and results will contain a list of data corresponding to fetch_targets.
284+
assert feed_target_names[0] == 'word_data'
285+
assert feed_target_names[1] == 'verb_data'
286+
assert feed_target_names[2] == 'ctx_n2_data'
287+
assert feed_target_names[3] == 'ctx_n1_data'
288+
assert feed_target_names[4] == 'ctx_0_data'
289+
assert feed_target_names[5] == 'ctx_p1_data'
290+
assert feed_target_names[6] == 'ctx_p2_data'
291+
assert feed_target_names[7] == 'mark_data'
292+
293+
results = exe.run(inference_program,
294+
feed={
295+
feed_target_names[0]: word,
296+
feed_target_names[1]: pred,
297+
feed_target_names[2]: ctx_n2,
298+
feed_target_names[3]: ctx_n1,
299+
feed_target_names[4]: ctx_0,
300+
feed_target_names[5]: ctx_p1,
301+
feed_target_names[6]: ctx_p2,
302+
feed_target_names[7]: mark
303+
},
304+
fetch_list=fetch_targets,
305+
return_numpy=False)
306+
print(results[0].lod())
307+
np_data = np.array(results[0])
308+
print("Inference Shape: ", np_data.shape)
299309

300310

301311
def main(use_cuda):

python/paddle/v2/fluid/tests/book/test_recognize_digits.py

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -165,25 +165,27 @@ def infer(use_cuda, save_dirname=None, param_filename=None):
165165
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
166166
exe = fluid.Executor(place)
167167

168-
# Use fluid.io.load_inference_model to obtain the inference program desc,
169-
# the feed_target_names (the names of variables that will be feeded
170-
# data using feed operators), and the fetch_targets (variables that
171-
# we want to obtain data from using fetch operators).
172-
[inference_program, feed_target_names, fetch_targets
173-
] = fluid.io.load_inference_model(save_dirname, exe, param_filename)
174-
175-
# The input's dimension of conv should be 4-D or 5-D.
176-
# Use normilized image pixels as input data, which should be in the range [-1.0, 1.0].
177-
batch_size = 1
178-
tensor_img = numpy.random.uniform(-1.0, 1.0,
179-
[batch_size, 1, 28, 28]).astype("float32")
180-
181-
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
182-
# and results will contain a list of data corresponding to fetch_targets.
183-
results = exe.run(inference_program,
184-
feed={feed_target_names[0]: tensor_img},
185-
fetch_list=fetch_targets)
186-
print("infer results: ", results[0])
168+
inference_scope = fluid.core.Scope()
169+
with fluid.scope_guard(inference_scope):
170+
# Use fluid.io.load_inference_model to obtain the inference program desc,
171+
# the feed_target_names (the names of variables that will be feeded
172+
# data using feed operators), and the fetch_targets (variables that
173+
# we want to obtain data from using fetch operators).
174+
[inference_program, feed_target_names, fetch_targets
175+
] = fluid.io.load_inference_model(save_dirname, exe, param_filename)
176+
177+
# The input's dimension of conv should be 4-D or 5-D.
178+
# Use normilized image pixels as input data, which should be in the range [-1.0, 1.0].
179+
batch_size = 1
180+
tensor_img = numpy.random.uniform(
181+
-1.0, 1.0, [batch_size, 1, 28, 28]).astype("float32")
182+
183+
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
184+
# and results will contain a list of data corresponding to fetch_targets.
185+
results = exe.run(inference_program,
186+
feed={feed_target_names[0]: tensor_img},
187+
fetch_list=fetch_targets)
188+
print("infer results: ", results[0])
187189

188190

189191
def main(use_cuda, parallel, nn_type, combine):

0 commit comments

Comments
 (0)