Skip to content

Commit f1cbb4b

Browse files
authored
Merge pull request #13479 from luotao1/inference_transpiler
hidden InferenceTranspiler
2 parents 23ba766 + ff37993 commit f1cbb4b

File tree

3 files changed

+3
-7
lines changed

3 files changed

+3
-7
lines changed

paddle/fluid/API.spec

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,6 @@ paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'en
5454
paddle.fluid.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None))
5555
paddle.fluid.DistributeTranspiler.get_trainer_program ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,))
5656
paddle.fluid.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None))
57-
paddle.fluid.InferenceTranspiler.__init__
58-
paddle.fluid.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,))
5957
paddle.fluid.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0))
6058
paddle.fluid.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))
6159
paddle.fluid.DistributeTranspilerConfig.__init__
@@ -338,8 +336,6 @@ paddle.fluid.transpiler.DistributeTranspiler.get_pserver_programs ArgSpec(args=[
338336
paddle.fluid.transpiler.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None))
339337
paddle.fluid.transpiler.DistributeTranspiler.get_trainer_program ArgSpec(args=['self', 'wait_port'], varargs=None, keywords=None, defaults=(True,))
340338
paddle.fluid.transpiler.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None))
341-
paddle.fluid.transpiler.InferenceTranspiler.__init__
342-
paddle.fluid.transpiler.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,))
343339
paddle.fluid.transpiler.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0))
344340
paddle.fluid.transpiler.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))
345341
paddle.fluid.transpiler.HashName.__init__ ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None)

python/paddle/fluid/tests/book/test_image_classification.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ def infer(use_cuda, save_dirname=None):
223223

224224
# Use inference_transpiler to speedup
225225
inference_transpiler_program = inference_program.clone()
226-
t = fluid.InferenceTranspiler()
226+
t = fluid.transpiler.InferenceTranspiler()
227227
t.transpile(inference_transpiler_program, place)
228228

229229
# Construct feed as a dictionary of {feed_target_name: feed_target_data}

python/paddle/fluid/transpiler/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,6 @@
2020
from .ps_dispatcher import HashName, RoundRobin
2121

2222
__all__ = [
23-
"DistributeTranspiler", "InferenceTranspiler", "memory_optimize",
24-
"release_memory", "HashName", "RoundRobin", "DistributeTranspilerConfig"
23+
"DistributeTranspiler", "memory_optimize", "release_memory", "HashName",
24+
"RoundRobin", "DistributeTranspilerConfig"
2525
]

0 commit comments

Comments
 (0)