Skip to content

Commit 2d009c9

Browse files
committed
update docs
1 parent b7e329d commit 2d009c9

File tree

2 files changed

+30
-23
lines changed

2 files changed

+30
-23
lines changed

docs/modules/layers.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -619,7 +619,7 @@ Bidirectional layer
619619

620620

621621

622-
Convolutional Recurrent layer
622+
Recurrent Convolutional layer
623623
-------------------------------
624624

625625
Conv RNN Cell

tensorlayer/layers.py

Lines changed: 29 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1842,7 +1842,7 @@ def tf_batch_map_offsets(inputs, offsets, grid_offset):
18421842

18431843
class DeformableConv2dLayer(Layer):
18441844
"""The :class:`DeformableConv2dLayer` class is a
1845-
`Deformable Convolutional Layer <https://arxiv.org/abs/1703.06211>`
1845+
`Deformable Convolutional Layer <https://arxiv.org/abs/1703.06211>`_ .
18461846
18471847
Parameters
18481848
-----------
@@ -4664,7 +4664,7 @@ def __init__(
46644664

46654665
# ConvLSTM layer
46664666
class ConvRNNCell(object):
4667-
"""Abstract object representing an Convolutional RNN cell.
4667+
"""Abstract object representing an Convolutional RNN Cell.
46684668
"""
46694669

46704670
def __call__(self, inputs, state, scope=None):
@@ -4699,21 +4699,23 @@ def zero_state(self, batch_size, dtype):
46994699
return zeros
47004700

47014701
class BasicConvLSTMCell(ConvRNNCell):
4702-
"""Basic Conv LSTM recurrent network cell. The
4702+
"""Basic Conv LSTM recurrent network cell.
4703+
4704+
Parameters
4705+
-----------
4706+
shape : int tuple thats the height and width of the cell
4707+
filter_size : int tuple thats the height and width of the filter
4708+
num_features : int thats the depth of the cell
4709+
forget_bias : float, The bias added to forget gates (see above).
4710+
input_size : Deprecated and unused.
4711+
state_is_tuple : If True, accepted and returned states are 2-tuples of
4712+
the `c_state` and `m_state`. If False, they are concatenated
4713+
along the column axis. The latter behavior will soon be deprecated.
4714+
activation : Activation function of the inner states.
47034715
"""
47044716
def __init__(self, shape, filter_size, num_features, forget_bias=1.0, input_size=None,
47054717
state_is_tuple=False, activation=tf.nn.tanh):
47064718
"""Initialize the basic Conv LSTM cell.
4707-
Args:
4708-
shape: int tuple thats the height and width of the cell
4709-
filter_size: int tuple thats the height and width of the filter
4710-
num_features: int thats the depth of the cell
4711-
forget_bias: float, The bias added to forget gates (see above).
4712-
input_size: Deprecated and unused.
4713-
state_is_tuple: If True, accepted and returned states are 2-tuples of
4714-
the `c_state` and `m_state`. If False, they are concatenated
4715-
along the column axis. The latter behavior will soon be deprecated.
4716-
activation: Activation function of the inner states.
47174719
"""
47184720
# if not state_is_tuple:
47194721
# logging.warn("%s: Using a concatenated state is slower and will soon be "
@@ -4764,16 +4766,22 @@ def __call__(self, inputs, state, scope=None):
47644766

47654767
def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None):
47664768
"""convolution:
4767-
Args:
4769+
4770+
Parameters
4771+
----------
47684772
args: a 4D Tensor or a list of 4D, batch x n, Tensors.
47694773
filter_size: int tuple of filter height and width.
47704774
num_features: int, number of features.
47714775
bias_start: starting value to initialize the bias; 0 by default.
47724776
scope: VariableScope for the created subgraph; defaults to "Linear".
4773-
Returns:
4774-
A 4D Tensor with shape [batch h w num_features]
4775-
Raises:
4776-
ValueError: if some of the arguments has unspecified or wrong shape.
4777+
4778+
Returns
4779+
--------
4780+
- A 4D Tensor with shape [batch h w num_features]
4781+
4782+
Raises
4783+
-------
4784+
- ValueError : if some of the arguments has unspecified or wrong shape.
47774785
"""
47784786

47794787
# Calculate the total size of arguments on dimension 1.
@@ -4808,8 +4816,9 @@ def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=No
48084816

48094817
class ConvLSTMLayer(Layer):
48104818
"""
4811-
The :class:`ConvLSTMLayer` class is a Convolutional LSTM layer.
4812-
`Convolutional LSTM Layer <https://arxiv.org/abs/1506.04214>`
4819+
The :class:`ConvLSTMLayer` class is a Convolutional LSTM layer,
4820+
see `Convolutional LSTM Layer <https://arxiv.org/abs/1506.04214>`_ .
4821+
48134822
Parameters
48144823
----------
48154824
layer : a :class:`Layer` instance
@@ -4857,9 +4866,7 @@ class ConvLSTMLayer(Layer):
48574866
48584867
batch_size : int or tensor
48594868
Is int, if able to compute the batch_size, otherwise, tensor for ``?``.
4860-
48614869
"""
4862-
48634870
def __init__(
48644871
self,
48654872
layer=None,

0 commit comments

Comments
 (0)