Skip to content

Commit f372f27

Browse files
authored
Hidden APIs for While, StaticRNN, ParallelDo. (#12332)
* Hidden APIs for While, StaticRNN, ParallelDo.
1 parent 4b8ae52 commit f372f27

File tree

2 files changed

+14
-13
lines changed

2 files changed

+14
-13
lines changed

paddle/fluid/API.spec

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,7 @@ paddle.fluid.layers.mean_iou ArgSpec(args=['input', 'label', 'num_classes'], var
170170
paddle.fluid.layers.relu ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None)
171171
paddle.fluid.layers.log ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None)
172172
paddle.fluid.layers.crop ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None))
173+
paddle.fluid.layers.rank_loss ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,))
173174
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
174175
paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True))
175176
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
@@ -201,7 +202,6 @@ paddle.fluid.layers.zeros ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=
201202
paddle.fluid.layers.reverse ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None)
202203
paddle.fluid.layers.While.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,))
203204
paddle.fluid.layers.While.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
204-
paddle.fluid.layers.While.complete ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
205205
paddle.fluid.layers.Switch.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
206206
paddle.fluid.layers.Switch.case ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None)
207207
paddle.fluid.layers.Switch.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
@@ -225,17 +225,14 @@ paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=
225225
paddle.fluid.layers.DynamicRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
226226
paddle.fluid.layers.DynamicRNN.update_memory ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None)
227227
paddle.fluid.layers.StaticRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
228-
paddle.fluid.layers.StaticRNN.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
229228
paddle.fluid.layers.StaticRNN.memory ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1))
230229
paddle.fluid.layers.StaticRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
231-
paddle.fluid.layers.StaticRNN.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
232230
paddle.fluid.layers.StaticRNN.step ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
233231
paddle.fluid.layers.StaticRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
234232
paddle.fluid.layers.StaticRNN.step_output ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None)
235233
paddle.fluid.layers.StaticRNN.update_memory ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None)
236234
paddle.fluid.layers.reorder_lod_tensor_by_rank ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None)
237235
paddle.fluid.layers.ParallelDo.__init__ ArgSpec(args=['self', 'places', 'use_nccl', 'name'], varargs=None, keywords=None, defaults=(False, None))
238-
paddle.fluid.layers.ParallelDo.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
239236
paddle.fluid.layers.ParallelDo.do ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
240237
paddle.fluid.layers.ParallelDo.get_parameters ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
241238
paddle.fluid.layers.ParallelDo.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)

python/paddle/fluid/layers/control_flow.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from ..initializer import force_init_on_cpu
2222
from ops import logical_and, logical_not, logical_or
2323
import numpy
24+
import warnings
2425

2526
__all__ = [
2627
'While',
@@ -280,6 +281,9 @@ class ParallelDo(object):
280281
"""
281282

282283
def __init__(self, places, use_nccl=False, name=None):
284+
warnings.warn(
285+
"API ParallelDo is deprecated since 0.15.0. Please use ParallelExecutor instead.",
286+
Warning)
283287
self.helper = LayerHelper("parallel_do", name=name)
284288
self.inputs = []
285289
self.places = places
@@ -338,7 +342,7 @@ def get_parameters(self):
338342

339343
return [parent_block.var(name) for name in params]
340344

341-
def complete_op(self):
345+
def _complete_op(self):
342346
main_program = self.helper.main_program
343347
current_block = main_program.current_block()
344348
parent_block = self.parent_block()
@@ -394,7 +398,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
394398
if exc_type is not None:
395399
return False
396400
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
397-
self.rnn.complete_op()
401+
self.rnn._complete_op()
398402
return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val,
399403
exc_tb)
400404

@@ -470,7 +474,7 @@ def memory(self,
470474
if shape is None or batch_ref is None:
471475
raise ValueError(
472476
"if init is None, memory at least need shape and batch_ref")
473-
parent_block = self.parent_block()
477+
parent_block = self._parent_block()
474478
var_name = unique_name.generate("@".join(
475479
[self.helper.name, "memory_boot"]))
476480
boot_var = parent_block.create_var(
@@ -527,7 +531,7 @@ def step_output(self, o):
527531
outputs={'Out': tmp_o},
528532
attrs={'dtype': o.dtype})
529533

530-
out_var = self.parent_block().create_var(
534+
out_var = self._parent_block().create_var(
531535
name=tmp_o.name,
532536
shape=[self.seq_len] + list(tmp_o.shape),
533537
dtype=tmp_o.dtype)
@@ -543,7 +547,7 @@ def update_memory(self, mem, var):
543547
raise TypeError("update memory should take variables")
544548
self.memories[mem.name].mem = var
545549

546-
def parent_block(self):
550+
def _parent_block(self):
547551
prog = self.helper.main_program
548552
parent_idx = prog.current_block().parent_idx
549553
assert parent_idx >= 0
@@ -560,10 +564,10 @@ def __call__(self, *args, **kwargs):
560564
else:
561565
return self.outputs
562566

563-
def complete_op(self):
567+
def _complete_op(self):
564568
main_program = self.helper.main_program
565569
rnn_block = main_program.current_block()
566-
parent_block = self.parent_block()
570+
parent_block = self._parent_block()
567571

568572
local_inputs = set()
569573

@@ -643,7 +647,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
643647
if exc_type is not None:
644648
return False
645649
self.while_op.status = While.AFTER_WHILE_BLOCK
646-
self.while_op.complete()
650+
self.while_op._complete()
647651
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
648652

649653

@@ -690,7 +694,7 @@ def __init__(self, cond, name=None):
690694
def block(self):
691695
return WhileGuard(self)
692696

693-
def complete(self):
697+
def _complete(self):
694698
main_program = self.helper.main_program
695699
while_block = main_program.current_block()
696700
parent_block = main_program.block(main_program.current_block()

0 commit comments

Comments
 (0)