Skip to content

Commit 6a79ba2

Browse files
jacquesqiaoluotao1
authored andcommitted
Add save inference model to trainer (#12682)
* add save_inference_model for Trainer * add comment * update comment * fix mac compile * add unit test * update API.spec * revert cpplint-cpp-source
1 parent 01b6895 commit 6a79ba2

File tree

3 files changed

+69
-4
lines changed

3 files changed

+69
-4
lines changed

paddle/fluid/API.spec

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list',
4343
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
4444
paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
4545
paddle.fluid.Trainer.__init__ ArgSpec(args=['self', 'train_func', 'optimizer_func', 'param_path', 'place', 'parallel', 'checkpoint_config'], varargs=None, keywords=None, defaults=(None, None, False, None))
46+
paddle.fluid.Trainer.save_inference_model ArgSpec(args=['self', 'param_path', 'feeded_var_names', 'target_var_indexes'], varargs=None, keywords=None, defaults=None)
4647
paddle.fluid.Trainer.save_params ArgSpec(args=['self', 'param_path'], varargs=None, keywords=None, defaults=None)
4748
paddle.fluid.Trainer.stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
4849
paddle.fluid.Trainer.test ArgSpec(args=['self', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=None)

python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py

Lines changed: 46 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,14 @@ def train_program():
4747
loss = fluid.layers.square_error_cost(input=y_predict, label=y)
4848
avg_loss = fluid.layers.mean(loss)
4949

50-
return avg_loss
50+
return [avg_loss, y_predict]
5151

5252

5353
def optimizer_func():
5454
return fluid.optimizer.SGD(learning_rate=0.001)
5555

5656

57-
def train(use_cuda, train_program, params_dirname):
57+
def train(use_cuda, train_program, params_dirname, inference_model_dirname):
5858
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
5959

6060
trainer = fluid.Trainer(
@@ -74,6 +74,8 @@ def event_handler(event):
7474
'''
7575
if params_dirname is not None:
7676
trainer.save_params(params_dirname)
77+
trainer.save_inference_model(inference_model_dirname,
78+
['x'], [1])
7779
trainer.stop()
7880

7981
trainer.train(
@@ -99,15 +101,55 @@ def infer(use_cuda, inference_program, params_dirname=None):
99101
print("infer results: ", results[0])
100102

101103

104+
def infer_by_saved_model(use_cuda, save_dirname=None):
105+
if save_dirname is None:
106+
return
107+
108+
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
109+
exe = fluid.Executor(place)
110+
111+
inference_scope = fluid.core.Scope()
112+
with fluid.scope_guard(inference_scope):
113+
# Use fluid.io.load_inference_model to obtain the inference program desc,
114+
# the feed_target_names (the names of variables that will be feeded
115+
# data using feed operators), and the fetch_targets (variables that
116+
# we want to obtain data from using fetch operators).
117+
[inference_program, feed_target_names,
118+
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
119+
120+
# The input's dimension should be 2-D and the second dim is 13
121+
# The input data should be >= 0
122+
batch_size = 10
123+
124+
test_reader = paddle.batch(
125+
paddle.dataset.uci_housing.test(), batch_size=batch_size)
126+
127+
test_data = next(test_reader())
128+
test_feat = numpy.array(
129+
[data[0] for data in test_data]).astype("float32")
130+
test_label = numpy.array(
131+
[data[1] for data in test_data]).astype("float32")
132+
133+
assert feed_target_names[0] == 'x'
134+
results = exe.run(inference_program,
135+
feed={feed_target_names[0]: numpy.array(test_feat)},
136+
fetch_list=fetch_targets)
137+
print("infer shape: ", results[0].shape)
138+
print("infer results: ", results[0])
139+
print("ground truth: ", test_label)
140+
141+
102142
def main(use_cuda):
103143
if use_cuda and not fluid.core.is_compiled_with_cuda():
104144
return
105145

106146
# Directory for saving the trained model
107-
params_dirname = "fit_a_line.inference.model"
147+
params_dirname = "fit_a_line.model"
148+
inference_model_dirname = "fit_a_line.inference_model"
108149

109-
train(use_cuda, train_program, params_dirname)
150+
train(use_cuda, train_program, params_dirname, inference_model_dirname)
110151
infer(use_cuda, inference_program, params_dirname)
152+
infer_by_saved_model(use_cuda, inference_model_dirname)
111153

112154

113155
class TestFitALine(unittest.TestCase):

python/paddle/fluid/trainer.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -431,6 +431,28 @@ def save_params(self, param_path):
431431
exe = executor.Executor(self.place)
432432
io.save_persistables(exe, dirname=param_path)
433433

434+
def save_inference_model(self, param_path, feeded_var_names,
435+
target_var_indexes):
436+
"""
437+
Save model for cpp inference into :code:`param_path`.
438+
439+
Args:
440+
param_path(str): The path to save parameters.
441+
feeded_var_names(list(str)): The name of the vars that you
442+
need to feed in before run program.
443+
target_var_indexes(list(int)): the index of target var that
444+
you need to return in trainer.train_func.
445+
Returns:
446+
None
447+
"""
448+
with self._prog_and_scope_guard():
449+
exe = executor.Executor(self.place)
450+
target_vars = [
451+
self.train_func_outputs[index] for index in target_var_indexes
452+
]
453+
io.save_inference_model(param_path, feeded_var_names, target_vars,
454+
exe)
455+
434456
@contextlib.contextmanager
435457
def _prog_and_scope_guard(self):
436458
with framework.program_guard(

0 commit comments

Comments
 (0)