Skip to content

Commit 0815291

Browse files
committed
add annotation in load_inference_model
1 parent f6b06bd commit 0815291

File tree

1 file changed

+8
-0
lines changed

1 file changed

+8
-0
lines changed

python/paddle/fluid/io.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -691,6 +691,10 @@ def load_inference_model(dirname,
691691
parameters were saved in a single binary
692692
file. If parameters were saved in separate
693693
files, set it as 'None'.
694+
pserver_endpoints(list|None): This only need by distributed inference.
695+
When use distributed look up table in training,
696+
We also need it in inference.The parameter is
697+
a list of pserver endpoints.
694698
695699
Returns:
696700
tuple: The return of this function is a tuple with three elements:
@@ -709,12 +713,16 @@ def load_inference_model(dirname,
709713
710714
exe = fluid.Executor(fluid.CPUPlace())
711715
path = "./infer_model"
716+
endpoints = ["127.0.0.1:2023","127.0.0.1:2024"]
712717
[inference_program, feed_target_names, fetch_targets] =
713718
fluid.io.load_inference_model(dirname=path, executor=exe)
714719
results = exe.run(inference_program,
715720
feed={feed_target_names[0]: tensor_img},
716721
fetch_list=fetch_targets)
717722
723+
# if we need lookup table, we will use:
724+
fluid.io.load_inference_model(dirname=path, executor=exe, pserver_endpoints=endpoints)
725+
718726
# In this exsample, the inference program was saved in the
719727
# "./infer_model/__model__" and parameters were saved in
720728
# separate files in ""./infer_model".

0 commit comments

Comments
 (0)