|
17 | 17 | import executor
|
18 | 18 | import framework
|
19 | 19 | import io
|
| 20 | +import parallel_executor |
20 | 21 | import unique_name
|
21 | 22 | from trainer import check_and_get_place
|
22 | 23 |
|
23 | 24 | __all__ = ['Inferencer', ]
|
24 | 25 |
|
25 | 26 |
|
26 | 27 | class Inferencer(object):
|
27 |
| - def __init__(self, infer_func, param_path, place=None): |
| 28 | + def __init__(self, infer_func, param_path, place=None, parallel=False): |
28 | 29 | """
|
29 | 30 | :param infer_func: a function that will return predict Variable
|
30 | 31 | :param param_path: the path where the inference model is saved by fluid.io.save_params
|
31 | 32 | :param place: place to do the inference
|
32 | 33 | """
|
33 | 34 | self.param_path = param_path
|
34 | 35 | self.scope = core.Scope()
|
| 36 | + self.parallel = parallel |
| 37 | + self.place = check_and_get_place(place) |
35 | 38 |
|
36 | 39 | self.inference_program = framework.Program()
|
37 | 40 | with framework.program_guard(self.inference_program):
|
38 | 41 | with unique_name.guard():
|
39 | 42 | self.predict_var = infer_func()
|
40 | 43 |
|
41 |
| - self.exe = executor.Executor(check_and_get_place(place)) |
| 44 | + if parallel: |
| 45 | + self.exe = parallel_executor.ParallelExecutor( |
| 46 | + use_cuda=isinstance(self.place, core.CUDAPlace), |
| 47 | + loss_name=self.predict_var.name) |
| 48 | + else: |
| 49 | + self.exe = executor.Executor(self.place) |
42 | 50 | with executor.scope_guard(self.scope):
|
43 | 51 | # load params from param_path into scope
|
44 | 52 | io.load_params(self.exe, param_path, self.inference_program)
|
|
0 commit comments