Skip to content

Commit feed94e

Browse files
committed
should load parameter before create parallel_executor
1 parent e8d24aa commit feed94e

File tree

5 files changed

+29
-24
lines changed

5 files changed

+29
-24
lines changed

python/paddle/fluid/inferencer.py

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import contextlib
16+
1517
import core
1618

1719
import executor
@@ -41,31 +43,36 @@ def __init__(self, infer_func, param_path, place=None, parallel=False):
4143
with unique_name.guard():
4244
self.predict_var = infer_func()
4345

46+
with self._prog_and_scope_guard():
47+
# load params from param_path into scope
48+
io.load_params(executor.Executor(self.place), param_path)
49+
4450
if parallel:
45-
self.exe = parallel_executor.ParallelExecutor(
46-
use_cuda=isinstance(self.place, core.CUDAPlace),
47-
loss_name=self.predict_var.name)
51+
with self._prog_and_scope_guard():
52+
self.exe = parallel_executor.ParallelExecutor(
53+
use_cuda=isinstance(self.place, core.CUDAPlace),
54+
loss_name=self.predict_var.name)
4855
else:
4956
self.exe = executor.Executor(self.place)
50-
with executor.scope_guard(self.scope):
51-
# load params from param_path into scope
52-
io.load_params(self.exe, param_path, self.inference_program)
5357

54-
def infer(self, inputs, return_numpy=True):
58+
def infer(self, inputs):
5559
"""
5660
:param inputs: a map of {"input_name": input_var} that will be feed into the inference program
5761
to get the predict value
58-
:param return_numpy: if return numpy value for row tensor
5962
:return: the predict value of the inference model
6063
"""
6164
if not isinstance(inputs, dict):
6265
raise ValueError(
6366
"inputs should be a map of {'input_name': input_var}")
6467

65-
with executor.scope_guard(self.scope):
66-
results = self.exe.run(self.inference_program,
67-
feed=inputs,
68-
fetch_list=[self.predict_var],
69-
return_numpy=return_numpy)
68+
with self._prog_and_scope_guard():
69+
results = self.exe.run(feed=inputs,
70+
fetch_list=[self.predict_var.name])
7071

7172
return results
73+
74+
@contextlib.contextmanager
75+
def _prog_and_scope_guard(self):
76+
with framework.program_guard(main_program=self.inference_program):
77+
with executor.scope_guard(self.scope):
78+
yield

python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
9494
tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")
9595

9696
results = inferencer.infer({'x': tensor_x})
97-
print("infer results: ", results[0])
97+
print("infer results: ", numpy.array(results[0]))
9898

9999

100100
def main(use_cuda):

python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
118118

119119
results = inferencer.infer({'img': tensor_img})
120120

121-
print("infer results: ", results[0])
121+
print("infer results: ", numpy.array(results[0]))
122122

123123

124124
def main(use_cuda):

python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def infer(use_cuda, inference_program, save_dirname=None):
9999

100100
results = inferencer.infer({'img': tensor_img})
101101

102-
print("infer results: ", results[0])
102+
print("infer results: ", numpy.array(results[0]))
103103

104104

105105
def main(use_cuda):

python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -127,14 +127,12 @@ def infer(use_cuda, inference_program, save_path):
127127
third_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
128128
fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
129129

130-
result = inferencer.infer(
131-
{
132-
'firstw': first_word,
133-
'secondw': second_word,
134-
'thirdw': third_word,
135-
'forthw': fourth_word
136-
},
137-
return_numpy=False)
130+
result = inferencer.infer({
131+
'firstw': first_word,
132+
'secondw': second_word,
133+
'thirdw': third_word,
134+
'forthw': fourth_word
135+
})
138136
print(np.array(result[0]))
139137

140138

0 commit comments

Comments
 (0)