Skip to content

Commit dbf0798

Browse files
authored
Merge pull request #13498 from luotao1/for_test
use clone(for_test=True) replace get_inference_program
2 parents fc44087 + 618944a commit dbf0798

File tree

4 files changed

+10
-32
lines changed

4 files changed

+10
-32
lines changed

paddle/fluid/API.spec

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ paddle.fluid.io.load_params ArgSpec(args=['executor', 'dirname', 'main_program',
7373
paddle.fluid.io.load_persistables ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None))
7474
paddle.fluid.io.save_inference_model ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True))
7575
paddle.fluid.io.load_inference_model ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None))
76-
paddle.fluid.io.get_inference_program ArgSpec(args=['target_vars', 'main_program'], varargs=None, keywords=None, defaults=(None,))
7776
paddle.fluid.initializer.ConstantInitializer.__init__ ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False))
7877
paddle.fluid.initializer.UniformInitializer.__init__ ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0))
7978
paddle.fluid.initializer.NormalInitializer.__init__ ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0))

python/paddle/fluid/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
from .param_attr import ParamAttr, WeightNormParamAttr
4747
from .data_feeder import DataFeeder
4848
from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope
49-
from .transpiler import DistributeTranspiler, InferenceTranspiler, \
49+
from .transpiler import DistributeTranspiler, \
5050
memory_optimize, release_memory, DistributeTranspilerConfig
5151
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
5252
from . import clip

python/paddle/fluid/io.py

Lines changed: 1 addition & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,7 @@
2727

2828
__all__ = [
2929
'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params',
30-
'load_persistables', 'save_inference_model', 'load_inference_model',
31-
'get_inference_program'
30+
'load_persistables', 'save_inference_model', 'load_inference_model'
3231
]
3332

3433

@@ -504,23 +503,6 @@ def load_persistables(executor, dirname, main_program=None, filename=None):
504503
filename=filename)
505504

506505

507-
def get_inference_program(target_vars, main_program=None):
508-
if main_program is None:
509-
main_program = default_main_program()
510-
if not isinstance(target_vars, list):
511-
target_vars = [target_vars]
512-
vars = []
513-
for var in target_vars:
514-
if isinstance(var, Evaluator):
515-
vars.extend(var.states)
516-
vars.extend(var.metrics)
517-
else:
518-
vars.append(var)
519-
pruned_program = main_program._prune(targets=vars)
520-
inference_program = pruned_program._inference_optimize()
521-
return inference_program
522-
523-
524506
def prepend_feed_ops(inference_program,
525507
feed_target_names,
526508
feed_holder_name='feed'):

python/paddle/fluid/tests/unittests/dist_transformer.py

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -437,13 +437,8 @@ def split_data(data, num_part):
437437
]
438438

439439

440-
def test_context(train_progm, avg_cost, train_exe, dev_count, data_input_names,
440+
def test_context(test_program, avg_cost, train_exe, dev_count, data_input_names,
441441
sum_cost, token_num):
442-
# Context to do validation.
443-
test_program = train_progm.clone()
444-
with fluid.program_guard(test_program):
445-
test_program = fluid.io.get_inference_program([avg_cost])
446-
447442
val_data = DataReader(
448443
src_vocab_fpath=TrainTaskConfig.src_vocab_fpath,
449444
trg_vocab_fpath=TrainTaskConfig.trg_vocab_fpath,
@@ -505,7 +500,7 @@ def test(exe=test_exe):
505500

506501

507502
def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
508-
token_num, predict):
503+
token_num, predict, test_program):
509504
# Initialize the parameters.
510505
if TrainTaskConfig.ckpt_path:
511506
lr_scheduler.current_steps = TrainTaskConfig.start_step
@@ -554,7 +549,7 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
554549
-1] + label_data_input_fields
555550

556551
if TrainTaskConfig.val_file_pattern is not None:
557-
test = test_context(train_progm, avg_cost, train_exe, dev_count,
552+
test = test_context(test_program, avg_cost, train_exe, dev_count,
558553
data_input_names, sum_cost, token_num)
559554

560555
# the best cross-entropy value with label smoothing
@@ -1647,6 +1642,8 @@ def get_model(is_dist, is_async):
16471642
local_lr_scheduler = LearningRateScheduler(ModelHyperParams.d_model,
16481643
TrainTaskConfig.warmup_steps,
16491644
TrainTaskConfig.learning_rate)
1645+
# Context to do validation.
1646+
test_program = fluid.default_main_program().clone(for_test=True)
16501647

16511648
if not is_dist:
16521649
optimizer = fluid.optimizer.Adam(
@@ -1671,7 +1668,7 @@ def get_model(is_dist, is_async):
16711668
epsilon=TrainTaskConfig.eps)
16721669
optimizer.minimize(sum_cost)
16731670

1674-
return sum_cost, avg_cost, predict, token_num, local_lr_scheduler
1671+
return sum_cost, avg_cost, predict, token_num, local_lr_scheduler, test_program
16751672

16761673

16771674
def update_args():
@@ -1705,7 +1702,7 @@ def run_pserver(self, args):
17051702
def run_trainer(self, use_cuda, args):
17061703
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
17071704
TrainTaskConfig.use_gpu = use_cuda
1708-
sum_cost, avg_cost, predict, token_num, local_lr_scheduler = get_model(
1705+
sum_cost, avg_cost, predict, token_num, local_lr_scheduler, test_program = get_model(
17091706
args.is_dist, not args.sync_mode)
17101707

17111708
if args.is_dist:
@@ -1726,7 +1723,7 @@ def run_trainer(self, use_cuda, args):
17261723
TrainTaskConfig.local = not args.is_dist
17271724

17281725
train_loop(startup_exe, trainer_prog, 1, sum_cost, avg_cost,
1729-
local_lr_scheduler, token_num, predict)
1726+
local_lr_scheduler, token_num, predict, test_program)
17301727

17311728

17321729
if __name__ == "__main__":

0 commit comments

Comments
 (0)