@@ -691,6 +691,10 @@ def load_inference_model(dirname,
691
691
parameters were saved in a single binary
692
692
file. If parameters were saved in separate
693
693
files, set it as 'None'.
694
+ pserver_endpoints(list|None): This only need by distributed inference.
695
+ When use distributed look up table in training,
696
+ We also need it in inference.The parameter is
697
+ a list of pserver endpoints.
694
698
695
699
Returns:
696
700
tuple: The return of this function is a tuple with three elements:
@@ -709,12 +713,16 @@ def load_inference_model(dirname,
709
713
710
714
exe = fluid.Executor(fluid.CPUPlace())
711
715
path = "./infer_model"
716
+ endpoints = ["127.0.0.1:2023","127.0.0.1:2024"]
712
717
[inference_program, feed_target_names, fetch_targets] =
713
718
fluid.io.load_inference_model(dirname=path, executor=exe)
714
719
results = exe.run(inference_program,
715
720
feed={feed_target_names[0]: tensor_img},
716
721
fetch_list=fetch_targets)
717
722
723
+ # if we need lookup table, we will use:
724
+ fluid.io.load_inference_model(dirname=path, executor=exe, pserver_endpoints=endpoints)
725
+
718
726
# In this exsample, the inference program was saved in the
719
727
# "./infer_model/__model__" and parameters were saved in
720
728
# separate files in ""./infer_model".
0 commit comments