20
20
from ..layer_helper import LayerHelper , unique_name
21
21
from ..initializer import force_init_on_cpu
22
22
from ops import logical_and , logical_not , logical_or
23
+ import numpy
23
24
24
25
__all__ = [
25
26
'split_lod_tensor' ,
@@ -1314,6 +1315,39 @@ def __call__(self):
1314
1315
1315
1316
1316
1317
class DynamicRNN (object ):
1318
+ """
1319
+ Dynamic RNN.
1320
+
1321
+ This RNN can process a batch of sequence data. The length of each sample
1322
+ sequence can be different. This API automatically process them in batch.
1323
+
1324
+ The input lod must be set. Please reference `lod_tensor`
1325
+
1326
+ >>> import paddle.fluid as fluid
1327
+ >>> data = fluid.layers.data(name='sentence', dtype='int64', lod_level=1)
1328
+ >>> embedding = fluid.layers.embedding(input=data, size=[65535, 32],
1329
+ >>> is_sparse=True)
1330
+ >>>
1331
+ >>> drnn = fluid.layers.DynamicRNN()
1332
+ >>> with drnn.block():
1333
+ >>> word = drnn.step_input(embedding)
1334
+ >>> prev = drnn.memory(shape=[200])
1335
+ >>> hidden = fluid.layers.fc(input=[word, prev], size=200, act='relu')
1336
+ >>> drnn.update_memory(prev, hidden) # set prev to hidden
1337
+ >>> drnn.output(hidden)
1338
+ >>>
1339
+ >>> # last is the last time step of rnn. It is the encoding result.
1340
+ >>> last = fluid.layers.sequence_last_step(drnn())
1341
+
1342
+ The dynamic RNN will unfold sequence into timesteps. Users need to define
1343
+ how to process each time step during the :code:`with` block.
1344
+
1345
+ The `memory` is used staging data cross time step. The initial value of
1346
+ memory can be zero or another variable.
1347
+
1348
+ The dynamic RNN can mark multiple variables as its output. Use `drnn()` to
1349
+ get the output sequence.
1350
+ """
1317
1351
BEFORE_RNN = 0
1318
1352
IN_RNN = 1
1319
1353
AFTER_RNN = 2
@@ -1336,6 +1370,15 @@ def __init__(self, name=None):
1336
1370
self .mem_link = []
1337
1371
1338
1372
def step_input (self , x ):
1373
+ """
1374
+ Mark a sequence as a dynamic RNN input.
1375
+ Args:
1376
+ x(Variable): The input sequence.
1377
+
1378
+ Returns:
1379
+ The current timestep in the input sequence.
1380
+
1381
+ """
1339
1382
self ._assert_in_rnn_block_ ("step_input" )
1340
1383
if not isinstance (x , Variable ):
1341
1384
raise TypeError (
@@ -1379,6 +1422,15 @@ def step_input(self, x):
1379
1422
return array_read (array = input_array , i = self .step_idx )
1380
1423
1381
1424
def static_input (self , x ):
1425
+ """
1426
+ Mark a variable as a RNN input. The input will not be scattered into
1427
+ time steps.
1428
+ Args:
1429
+ x(Variable): The input variable.
1430
+
1431
+ Returns:
1432
+ The input variable that can access in RNN.
1433
+ """
1382
1434
self ._assert_in_rnn_block_ ("static_input" )
1383
1435
if not isinstance (x , Variable ):
1384
1436
raise TypeError (
@@ -1400,6 +1452,10 @@ def static_input(self, x):
1400
1452
1401
1453
@contextlib .contextmanager
1402
1454
def block (self ):
1455
+ """
1456
+ The block for user to define operators in RNN. See the class docstring
1457
+ for more details.
1458
+ """
1403
1459
if self .status != DynamicRNN .BEFORE_RNN :
1404
1460
raise ValueError ("rnn.block() can only be invoke once" )
1405
1461
self .step_idx = fill_constant (
@@ -1426,6 +1482,9 @@ def block(self):
1426
1482
x = each_array , table = self .lod_rank_table ))
1427
1483
1428
1484
def __call__ (self , * args , ** kwargs ):
1485
+ """
1486
+ Get the output of RNN. This API should only be invoked after RNN.block()
1487
+ """
1429
1488
if self .status != DynamicRNN .AFTER_RNN :
1430
1489
raise ValueError (("Output of the dynamic RNN can only be visited "
1431
1490
"outside the rnn block." ))
@@ -1440,6 +1499,70 @@ def memory(self,
1440
1499
value = 0.0 ,
1441
1500
need_reorder = False ,
1442
1501
dtype = 'float32' ):
1502
+ """
1503
+ Create a memory variable.
1504
+
1505
+ If the :code:`init` is not None, :code:`memory` will be initialized by
1506
+ this variable. The :code:`need_reorder` is used to reorder the memory as
1507
+ the input variable. It should be set to true when the initialized memory
1508
+ depends on the input sample.
1509
+
1510
+ For example,
1511
+
1512
+ >>> import paddle.fluid as fluid
1513
+ >>> sentence = fluid.layers.data(
1514
+ >>> name='sentence', dtype='float32', shape=[32])
1515
+ >>> boot_memory = fluid.layers.data(
1516
+ >>> name='boot', dtype='float32', shape=[10])
1517
+ >>>
1518
+ >>> drnn = fluid.layers.DynamicRNN()
1519
+ >>> with drnn.block():
1520
+ >>> word = drnn.step_input(sentence)
1521
+ >>> memory = drnn.memory(init=boot_memory, need_reorder=True)
1522
+ >>> hidden = fluid.layers.fc(
1523
+ >>> input=[word, memory], size=10, act='tanh')
1524
+ >>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
1525
+ >>> drnn.output(hidden)
1526
+ >>> rnn_output = drnn()
1527
+
1528
+
1529
+ Otherwise, if :code:`shape`, :code:`value`, :code:`dtype` are set, the
1530
+ :code:`memory` will be initialized by this :code:`value`.
1531
+
1532
+ For example,
1533
+
1534
+ >>> import paddle.fluid as fluid
1535
+ >>> sentence = fluid.layers.data(
1536
+ >>> name='sentence', dtype='float32', shape=[32])
1537
+ >>>
1538
+ >>> drnn = fluid.layers.DynamicRNN()
1539
+ >>> with drnn.block():
1540
+ >>> word = drnn.step_input(sentence)
1541
+ >>> memory = drnn.memory(shape=[10], dtype='float32', value=0)
1542
+ >>> hidden = fluid.layers.fc(
1543
+ >>> input=[word, memory], size=10, act='tanh')
1544
+ >>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
1545
+ >>> drnn.output(hidden)
1546
+ >>> rnn_output = drnn()
1547
+
1548
+
1549
+ Args:
1550
+ init(Variable|None): The initialized variable.
1551
+
1552
+ shape(list|tuple): The memory shape. NOTE the shape does not contain
1553
+ batch_size.
1554
+
1555
+ value(float): the initalized value.
1556
+
1557
+ need_reorder(bool): True if the initialized memory depends on the
1558
+ input sample.
1559
+
1560
+ dtype(str|numpy.dtype): The data type of the initialized memory.
1561
+
1562
+ Returns:
1563
+ the memory variable.
1564
+
1565
+ """
1443
1566
self ._assert_in_rnn_block_ ('memory' )
1444
1567
if init is not None :
1445
1568
if not isinstance (init , Variable ):
@@ -1507,6 +1630,16 @@ def memory(self,
1507
1630
return self .memory (init = init )
1508
1631
1509
1632
def update_memory (self , ex_mem , new_mem ):
1633
+ """
1634
+ Update the memory from ex_mem to new_mem. NOTE that the shape and data
1635
+ type of :code:`ex_mem` and :code:`new_mem` must be same.
1636
+ Args:
1637
+ ex_mem(Variable): the memory variable.
1638
+ new_mem(Variable): the plain variable generated in RNN block.
1639
+
1640
+ Returns:
1641
+ None
1642
+ """
1510
1643
self ._assert_in_rnn_block_ ('update_memory' )
1511
1644
if not isinstance (ex_mem , Variable ):
1512
1645
raise TypeError ("The input arg `ex_mem` of update_memory() must "
@@ -1524,6 +1657,15 @@ def update_memory(self, ex_mem, new_mem):
1524
1657
self .mem_link .append ((new_mem , mem_array ))
1525
1658
1526
1659
def output (self , * outputs ):
1660
+ """
1661
+ mark the RNN output variables.
1662
+
1663
+ Args:
1664
+ outputs: The output variables.
1665
+
1666
+ Returns:
1667
+ None
1668
+ """
1527
1669
self ._assert_in_rnn_block_ ('output' )
1528
1670
parent_block = self ._parent_block_ ()
1529
1671
for each in outputs :
0 commit comments