Skip to content

Commit ce4eba3

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into port_python3_syntax
2 parents 000ba1a + f372f27 commit ce4eba3

File tree

2 files changed

+28
-27
lines changed

2 files changed

+28
-27
lines changed

paddle/fluid/API.spec

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,7 @@ paddle.fluid.layers.mean_iou ArgSpec(args=['input', 'label', 'num_classes'], var
170170
paddle.fluid.layers.relu ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None)
171171
paddle.fluid.layers.log ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None)
172172
paddle.fluid.layers.crop ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None))
173+
paddle.fluid.layers.rank_loss ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,))
173174
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
174175
paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True))
175176
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
@@ -201,7 +202,6 @@ paddle.fluid.layers.zeros ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=
201202
paddle.fluid.layers.reverse ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None)
202203
paddle.fluid.layers.While.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,))
203204
paddle.fluid.layers.While.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
204-
paddle.fluid.layers.While.complete ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
205205
paddle.fluid.layers.Switch.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
206206
paddle.fluid.layers.Switch.case ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None)
207207
paddle.fluid.layers.Switch.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
@@ -225,17 +225,14 @@ paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=
225225
paddle.fluid.layers.DynamicRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
226226
paddle.fluid.layers.DynamicRNN.update_memory ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None)
227227
paddle.fluid.layers.StaticRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
228-
paddle.fluid.layers.StaticRNN.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
229228
paddle.fluid.layers.StaticRNN.memory ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1))
230229
paddle.fluid.layers.StaticRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
231-
paddle.fluid.layers.StaticRNN.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
232230
paddle.fluid.layers.StaticRNN.step ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
233231
paddle.fluid.layers.StaticRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
234232
paddle.fluid.layers.StaticRNN.step_output ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None)
235233
paddle.fluid.layers.StaticRNN.update_memory ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None)
236234
paddle.fluid.layers.reorder_lod_tensor_by_rank ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None)
237235
paddle.fluid.layers.ParallelDo.__init__ ArgSpec(args=['self', 'places', 'use_nccl', 'name'], varargs=None, keywords=None, defaults=(False, None))
238-
paddle.fluid.layers.ParallelDo.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
239236
paddle.fluid.layers.ParallelDo.do ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
240237
paddle.fluid.layers.ParallelDo.get_parameters ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
241238
paddle.fluid.layers.ParallelDo.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)

python/paddle/fluid/layers/control_flow.py

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from ..initializer import force_init_on_cpu
2222
from .ops import logical_and, logical_not, logical_or
2323
import numpy
24+
import warnings
2425
from functools import reduce
2526

2627
__all__ = [
@@ -276,11 +277,14 @@ class ParallelDo(object):
276277
avg_cost = fluid.layers.mean(x=cost)
277278
278279
.. warning::
279-
280+
280281
It will be soon deprecated, please use ParallelExecutor instead.
281282
"""
282283

283284
def __init__(self, places, use_nccl=False, name=None):
285+
warnings.warn(
286+
"API ParallelDo is deprecated since 0.15.0. Please use ParallelExecutor instead.",
287+
Warning)
284288
self.helper = LayerHelper("parallel_do", name=name)
285289
self.inputs = []
286290
self.places = places
@@ -339,7 +343,7 @@ def get_parameters(self):
339343

340344
return [parent_block.var(name) for name in params]
341345

342-
def complete_op(self):
346+
def _complete_op(self):
343347
main_program = self.helper.main_program
344348
current_block = main_program.current_block()
345349
parent_block = self.parent_block()
@@ -395,7 +399,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
395399
if exc_type is not None:
396400
return False
397401
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
398-
self.rnn.complete_op()
402+
self.rnn._complete_op()
399403
return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val,
400404
exc_tb)
401405

@@ -471,7 +475,7 @@ def memory(self,
471475
if shape is None or batch_ref is None:
472476
raise ValueError(
473477
"if init is None, memory at least need shape and batch_ref")
474-
parent_block = self.parent_block()
478+
parent_block = self._parent_block()
475479
var_name = unique_name.generate("@".join(
476480
[self.helper.name, "memory_boot"]))
477481
boot_var = parent_block.create_var(
@@ -528,7 +532,7 @@ def step_output(self, o):
528532
outputs={'Out': tmp_o},
529533
attrs={'dtype': o.dtype})
530534

531-
out_var = self.parent_block().create_var(
535+
out_var = self._parent_block().create_var(
532536
name=tmp_o.name,
533537
shape=[self.seq_len] + list(tmp_o.shape),
534538
dtype=tmp_o.dtype)
@@ -544,7 +548,7 @@ def update_memory(self, mem, var):
544548
raise TypeError("update memory should take variables")
545549
self.memories[mem.name].mem = var
546550

547-
def parent_block(self):
551+
def _parent_block(self):
548552
prog = self.helper.main_program
549553
parent_idx = prog.current_block().parent_idx
550554
assert parent_idx >= 0
@@ -561,10 +565,10 @@ def __call__(self, *args, **kwargs):
561565
else:
562566
return self.outputs
563567

564-
def complete_op(self):
568+
def _complete_op(self):
565569
main_program = self.helper.main_program
566570
rnn_block = main_program.current_block()
567-
parent_block = self.parent_block()
571+
parent_block = self._parent_block()
568572

569573
local_inputs = set()
570574

@@ -644,7 +648,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
644648
if exc_type is not None:
645649
return False
646650
self.while_op.status = While.AFTER_WHILE_BLOCK
647-
self.while_op.complete()
651+
self.while_op._complete()
648652
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
649653

650654

@@ -691,7 +695,7 @@ def __init__(self, cond, name=None):
691695
def block(self):
692696
return WhileGuard(self)
693697

694-
def complete(self):
698+
def _complete(self):
695699
main_program = self.helper.main_program
696700
while_block = main_program.current_block()
697701
parent_block = main_program.block(main_program.current_block()
@@ -816,21 +820,21 @@ def max_sequence_len(rank_table):
816820

817821

818822
def lod_tensor_to_array(x, table):
819-
"""
823+
"""
820824
Convert a LoDTensor to a LoDTensorArray.
821825
822-
This function split a LoDTesnor to a LoDTensorArray according to its LoD
823-
information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in
824-
PaddlePaddle. The generated LoDTensorArray of this function can be further read
825-
or written by `read_from_array()` and `write_to_array()` operators. However,
826-
this function is generally an internal component of PaddlePaddle `DynamicRNN`.
826+
This function split a LoDTesnor to a LoDTensorArray according to its LoD
827+
information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in
828+
PaddlePaddle. The generated LoDTensorArray of this function can be further read
829+
or written by `read_from_array()` and `write_to_array()` operators. However,
830+
this function is generally an internal component of PaddlePaddle `DynamicRNN`.
827831
Users should not use it directly.
828832
829833
Args:
830834
x (Variable|list): The LoDTensor to be converted to a LoDTensorArray.
831835
table (ParamAttr|list): The variable that stores the level of lod
832836
which is ordered by sequence length in
833-
descending order. It is generally generated
837+
descending order. It is generally generated
834838
by `layers.lod_rank_table()` API.
835839
836840
Returns:
@@ -1064,9 +1068,9 @@ def array_read(array, i):
10641068
Given:
10651069
10661070
array = [0.6, 0.1, 0.3, 0.1]
1067-
1071+
10681072
And:
1069-
1073+
10701074
i = 2
10711075
10721076
Then:
@@ -1173,9 +1177,9 @@ def array_length(array):
11731177

11741178
class ConditionalBlockGuard(BlockGuard):
11751179
"""
1176-
ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
1177-
holding a ConditionalBlock, and helping users entering and exiting the
1178-
ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
1180+
ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
1181+
holding a ConditionalBlock, and helping users entering and exiting the
1182+
ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
11791183
is generally an internal component of IfElse, users should not use it directly.
11801184
"""
11811185

@@ -1929,7 +1933,7 @@ def is_empty(x, cond=None, **ignored):
19291933
19301934
Args:
19311935
x (Variable): The Variable to be tested.
1932-
cond (Variable|None): Output parameter. Returns the test result
1936+
cond (Variable|None): Output parameter. Returns the test result
19331937
of given 'x'. Default: None
19341938
19351939
Returns:

0 commit comments

Comments
 (0)