Skip to content

Commit 24bea40

Browse files
authored
Hiden some LoDTensor ralated ops' Python wrapper. (#12230)
* Hiden some LoDTensor ralatted ops' Python wrapper.
1 parent 6169d72 commit 24bea40

11 files changed

+55
-56
lines changed

paddle/fluid/API.spec

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -200,31 +200,23 @@ paddle.fluid.layers.argsort ArgSpec(args=['input', 'axis', 'name'], varargs=None
200200
paddle.fluid.layers.ones ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,))
201201
paddle.fluid.layers.zeros ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,))
202202
paddle.fluid.layers.reverse ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None)
203-
paddle.fluid.layers.split_lod_tensor ArgSpec(args=['input', 'mask', 'level'], varargs=None, keywords=None, defaults=(0,))
204-
paddle.fluid.layers.merge_lod_tensor ArgSpec(args=['in_true', 'in_false', 'x', 'mask', 'level'], varargs=None, keywords=None, defaults=(0,))
205203
paddle.fluid.layers.While.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,))
206204
paddle.fluid.layers.While.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
207205
paddle.fluid.layers.While.complete ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
208206
paddle.fluid.layers.Switch.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
209207
paddle.fluid.layers.Switch.case ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None)
210208
paddle.fluid.layers.Switch.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
211-
paddle.fluid.layers.lod_rank_table ArgSpec(args=['x', 'level'], varargs=None, keywords=None, defaults=(0,))
212-
paddle.fluid.layers.max_sequence_len ArgSpec(args=['rank_table'], varargs=None, keywords=None, defaults=None)
213-
paddle.fluid.layers.lod_tensor_to_array ArgSpec(args=['x', 'table'], varargs=None, keywords=None, defaults=None)
214-
paddle.fluid.layers.array_to_lod_tensor ArgSpec(args=['x', 'table'], varargs=None, keywords=None, defaults=None)
215209
paddle.fluid.layers.increment ArgSpec(args=['x', 'value', 'in_place'], varargs=None, keywords=None, defaults=(1.0, True))
216210
paddle.fluid.layers.array_write ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,))
217211
paddle.fluid.layers.create_array ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None)
218212
paddle.fluid.layers.less_than ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords='ignored', defaults=(None, None))
219213
paddle.fluid.layers.equal ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords='ignored', defaults=(None,))
220214
paddle.fluid.layers.array_read ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None)
221-
paddle.fluid.layers.shrink_memory ArgSpec(args=['x', 'i', 'table'], varargs=None, keywords=None, defaults=None)
222215
paddle.fluid.layers.array_length ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None)
223216
paddle.fluid.layers.IfElse.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,))
224217
paddle.fluid.layers.IfElse.false_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
225218
paddle.fluid.layers.IfElse.input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
226219
paddle.fluid.layers.IfElse.output ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
227-
paddle.fluid.layers.IfElse.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
228220
paddle.fluid.layers.IfElse.true_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
229221
paddle.fluid.layers.DynamicRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
230222
paddle.fluid.layers.DynamicRNN.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
@@ -233,9 +225,6 @@ paddle.fluid.layers.DynamicRNN.output ArgSpec(args=['self'], varargs='outputs',
233225
paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
234226
paddle.fluid.layers.DynamicRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
235227
paddle.fluid.layers.DynamicRNN.update_memory ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None)
236-
paddle.fluid.layers.ConditionalBlock.__init__ ArgSpec(args=['self', 'inputs', 'is_scalar_condition', 'name'], varargs=None, keywords=None, defaults=(False, None))
237-
paddle.fluid.layers.ConditionalBlock.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
238-
paddle.fluid.layers.ConditionalBlock.complete ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
239228
paddle.fluid.layers.StaticRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
240229
paddle.fluid.layers.StaticRNN.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
241230
paddle.fluid.layers.StaticRNN.memory ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1))

python/paddle/fluid/layers/control_flow.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -23,25 +23,17 @@
2323
import numpy
2424

2525
__all__ = [
26-
'split_lod_tensor',
27-
'merge_lod_tensor',
2826
'While',
2927
'Switch',
30-
'lod_rank_table',
31-
'max_sequence_len',
32-
'lod_tensor_to_array',
33-
'array_to_lod_tensor',
3428
'increment',
3529
'array_write',
3630
'create_array',
3731
'less_than',
3832
'equal',
3933
'array_read',
40-
'shrink_memory',
4134
'array_length',
4235
'IfElse',
4336
'DynamicRNN',
44-
'ConditionalBlock',
4537
'StaticRNN',
4638
'reorder_lod_tensor_by_rank',
4739
'ParallelDo',
@@ -1457,7 +1449,7 @@ def input(self, x):
14571449
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
14581450
raise ValueError("input must in true/false blocks")
14591451
if id(x) not in self.input_table:
1460-
parent_block = self.parent_block()
1452+
parent_block = self._parent_block()
14611453
out_true = parent_block.create_var(
14621454
name=unique_name.generate('ifelse_input' + self.helper.name),
14631455
dtype=x.dtype)
@@ -1483,7 +1475,7 @@ def input(self, x):
14831475
else:
14841476
return out_false
14851477

1486-
def parent_block(self):
1478+
def _parent_block(self):
14871479
current_block = self.helper.main_program.current_block()
14881480
return self.helper.main_program.block(current_block.parent_idx)
14891481

@@ -1499,7 +1491,7 @@ def output(self, *outs):
14991491

15001492
out_table = self.output_table[1 if self.status ==
15011493
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
1502-
parent_block = self.parent_block()
1494+
parent_block = self._parent_block()
15031495
for each_out in outs:
15041496
if not isinstance(each_out, Variable):
15051497
raise TypeError("Each output should be a variable")

python/paddle/fluid/tests/test_if_else_op.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@
1919
from paddle.fluid.optimizer import MomentumOptimizer
2020
import paddle.fluid.core as core
2121
import paddle.fluid as fluid
22+
from paddle.fluid.layers.control_flow import split_lod_tensor
23+
from paddle.fluid.layers.control_flow import merge_lod_tensor
24+
from paddle.fluid.layers.control_flow import ConditionalBlock
25+
2226
import unittest
2327
import numpy as np
2428

@@ -34,26 +38,25 @@ def test_raw_api(self):
3438

3539
limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
3640
cond = layers.less_than(x=label, y=limit)
37-
true_image, false_image = layers.split_lod_tensor(
38-
input=image, mask=cond)
41+
true_image, false_image = split_lod_tensor(input=image, mask=cond)
3942

4043
true_out = layers.create_tensor(dtype='float32')
41-
true_cond = layers.ConditionalBlock([cond])
44+
true_cond = ConditionalBlock([cond])
4245

4346
with true_cond.block():
4447
hidden = layers.fc(input=true_image, size=100, act='tanh')
4548
prob = layers.fc(input=hidden, size=10, act='softmax')
4649
layers.assign(input=prob, output=true_out)
4750

4851
false_out = layers.create_tensor(dtype='float32')
49-
false_cond = layers.ConditionalBlock([cond])
52+
false_cond = ConditionalBlock([cond])
5053

5154
with false_cond.block():
5255
hidden = layers.fc(input=false_image, size=200, act='tanh')
5356
prob = layers.fc(input=hidden, size=10, act='softmax')
5457
layers.assign(input=prob, output=false_out)
5558

56-
prob = layers.merge_lod_tensor(
59+
prob = merge_lod_tensor(
5760
in_true=true_out, in_false=false_out, mask=cond, x=image)
5861
loss = layers.cross_entropy(input=prob, label=label)
5962
avg_loss = layers.mean(loss)

python/paddle/fluid/tests/unittests/test_conditional_block.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,15 @@
1818
from paddle.fluid.framework import default_startup_program, default_main_program
1919
from paddle.fluid.executor import Executor
2020
from paddle.fluid.backward import append_backward
21+
from paddle.fluid.layers.control_flow import ConditionalBlock
2122
import numpy
2223

2324

24-
class ConditionalBlock(unittest.TestCase):
25+
class ConditionalBlockTest(unittest.TestCase):
2526
def test_forward(self):
2627
data = layers.data(name='X', shape=[1], dtype='float32')
2728
data.stop_gradient = False
28-
cond = layers.ConditionalBlock(inputs=[data])
29+
cond = ConditionalBlock(inputs=[data])
2930
out = layers.create_tensor(dtype='float32')
3031
with cond.block():
3132
hidden = layers.fc(input=data, size=10)

python/paddle/fluid/tests/unittests/test_const_value.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
import paddle.fluid.framework as framework
1717

1818

19-
class ConditionalBlock(unittest.TestCase):
19+
class ConstantTest(unittest.TestCase):
2020
def test_const_value(self):
2121
self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD")
2222
self.assertEqual(framework.TEMP_VAR_NAME, "@TEMP@")

python/paddle/fluid/tests/unittests/test_dyn_rnn.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,12 @@
1717
import unittest
1818
import numpy
1919

20+
from paddle.fluid.layers.control_flow import lod_rank_table
21+
from paddle.fluid.layers.control_flow import max_sequence_len
22+
from paddle.fluid.layers.control_flow import lod_tensor_to_array
23+
from paddle.fluid.layers.control_flow import array_to_lod_tensor
24+
from paddle.fluid.layers.control_flow import shrink_memory
25+
2026

2127
class TestDynRNN(unittest.TestCase):
2228
def setUp(self):
@@ -38,12 +44,11 @@ def test_plain_while_op(self):
3844

3945
label = fluid.layers.data(name='label', shape=[1], dtype='float32')
4046

41-
rank_table = fluid.layers.lod_rank_table(x=sent_emb)
47+
rank_table = lod_rank_table(x=sent_emb)
4248

43-
sent_emb_array = fluid.layers.lod_tensor_to_array(
44-
x=sent_emb, table=rank_table)
49+
sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)
4550

46-
seq_len = fluid.layers.max_sequence_len(rank_table=rank_table)
51+
seq_len = max_sequence_len(rank_table=rank_table)
4752
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
4853
i.stop_gradient = False
4954

@@ -66,7 +71,7 @@ def test_plain_while_op(self):
6671
mem = fluid.layers.array_read(array=mem_array, i=i)
6772
ipt = fluid.layers.array_read(array=sent_emb_array, i=i)
6873

69-
mem = fluid.layers.shrink_memory(x=mem, i=i, table=rank_table)
74+
mem = shrink_memory(x=mem, i=i, table=rank_table)
7075

7176
hidden = fluid.layers.fc(input=[mem, ipt], size=100, act='tanh')
7277

@@ -75,8 +80,7 @@ def test_plain_while_op(self):
7580
fluid.layers.array_write(x=hidden, i=i, array=mem_array)
7681
fluid.layers.less_than(x=i, y=seq_len, cond=cond)
7782

78-
all_timesteps = fluid.layers.array_to_lod_tensor(
79-
x=out, table=rank_table)
83+
all_timesteps = array_to_lod_tensor(x=out, table=rank_table)
8084
last = fluid.layers.sequence_last_step(input=all_timesteps)
8185
logits = fluid.layers.fc(input=last, size=1, act=None)
8286
loss = fluid.layers.sigmoid_cross_entropy_with_logits(

python/paddle/fluid/tests/unittests/test_lod_rank_table.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from paddle.fluid.layers import lod_rank_table, data
15+
from paddle.fluid.layers import data
16+
from paddle.fluid.layers.control_flow import lod_rank_table
1617
from paddle.fluid.executor import Executor
1718
import paddle.fluid.core as core
1819
import numpy

python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@
2020
from paddle.fluid.executor import Executor
2121
from paddle.fluid.backward import append_backward
2222

23+
from paddle.fluid.layers.control_flow import lod_rank_table
24+
from paddle.fluid.layers.control_flow import max_sequence_len
25+
from paddle.fluid.layers.control_flow import lod_tensor_to_array
26+
from paddle.fluid.layers.control_flow import array_to_lod_tensor
27+
2328

2429
class TestCPULoDTensorArrayOps(unittest.TestCase):
2530
def place(self):
@@ -137,13 +142,13 @@ def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
137142
with program_guard(program):
138143
x = layers.data(name='x', shape=[10])
139144
x.persistable = True
140-
table = layers.lod_rank_table(x, level=level)
141-
max_len = layers.max_sequence_len(table)
145+
table = lod_rank_table(x, level=level)
146+
max_len = max_sequence_len(table)
142147
max_len.persistable = True
143-
array = layers.lod_tensor_to_array(x, table)
148+
array = lod_tensor_to_array(x, table)
144149
array.persistable = True
145150

146-
result = layers.array_to_lod_tensor(array, table)
151+
result = array_to_lod_tensor(array, table)
147152
result.persistable = True
148153
exe = Executor(place)
149154
scope = core.Scope()
@@ -181,9 +186,9 @@ def test_grad(self):
181186
with program_guard(program):
182187
x = layers.data(
183188
name='x', shape=[1], dtype='float32', stop_gradient=False)
184-
table = layers.lod_rank_table(x, level=0)
185-
array = layers.lod_tensor_to_array(x, table)
186-
result = layers.array_to_lod_tensor(array, table)
189+
table = lod_rank_table(x, level=0)
190+
array = lod_tensor_to_array(x, table)
191+
result = array_to_lod_tensor(array, table)
187192

188193
mean = layers.mean(result)
189194

python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
import unittest
1616
import paddle.fluid as fluid
1717
import paddle.fluid.core as core
18+
from paddle.fluid.layers.control_flow import lod_rank_table
1819
import numpy
1920

2021

@@ -34,7 +35,7 @@ def set_program(cls):
3435
dat.stop_gradient = False
3536
rank_dat = fluid.layers.data(
3637
name=cls.data_desc[1][0], shape=cls.data_desc[1][1])
37-
table = fluid.layers.lod_rank_table(rank_dat)
38+
table = lod_rank_table(rank_dat)
3839
new_dat = fluid.layers.reorder_lod_tensor_by_rank(
3940
x=dat, rank_table=table)
4041
loss = fluid.layers.reduce_sum(new_dat)

python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@
2121
from paddle.fluid.framework import Program
2222
import numpy as np
2323

24+
from paddle.fluid.layers.control_flow import shrink_memory
25+
from paddle.fluid.layers.control_flow import lod_rank_table
26+
2427

2528
class TestShrinkRNNMemoryBase(unittest.TestCase):
2629
def setUp(self):
@@ -30,15 +33,15 @@ def setUp(self):
3033
x.stop_gradient = False
3134
rank_table_tensor = layers.data(
3235
'rank_table_tensor', shape=[1], dtype='float32', lod_level=1)
33-
table = layers.lod_rank_table(x=rank_table_tensor)
36+
table = lod_rank_table(x=rank_table_tensor)
3437
i = layers.zeros(dtype='int64', shape=[1])
35-
self.mem1 = layers.shrink_memory(x=x, i=i, table=table)
38+
self.mem1 = shrink_memory(x=x, i=i, table=table)
3639
i = layers.increment(x=i)
3740
i.stop_gradient = True
38-
self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table)
41+
self.mem2 = shrink_memory(x=self.mem1, i=i, table=table)
3942
i = layers.increment(x=i)
4043
i.stop_gradient = True
41-
self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table)
44+
self.mem3 = shrink_memory(x=self.mem2, i=i, table=table)
4245
mem3_mean = layers.mean(self.mem3)
4346
append_backward(loss=mem3_mean)
4447
self.x_grad = self.main_program.global_block().var('x@GRAD')

0 commit comments

Comments
 (0)