Skip to content

Commit 4d164ea

Browse files
authored
fix typos and memory optimisation
fix typos and memory optimisation
1 parent 8818ee0 commit 4d164ea

File tree

1 file changed

+19
-27
lines changed

1 file changed

+19
-27
lines changed

tensorlayer/layers.py

Lines changed: 19 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1805,7 +1805,7 @@ def tf_batch_map_offsets(inputs, offsets, grid_offset):
18051805
---------
18061806
inputs : tf.Tensor. shape = (b, h, w, c)
18071807
offsets: tf.Tensor. shape = (b, h, w, 2*n)
1808-
grid_offset: Offset grids
1808+
grid_offset: Offset grids shape = (h, w, n, 2)
18091809
18101810
Returns
18111811
-------
@@ -1814,25 +1814,26 @@ def tf_batch_map_offsets(inputs, offsets, grid_offset):
18141814

18151815
input_shape = inputs.get_shape()
18161816
batch_size = tf.shape(inputs)[0]
1817-
kernel_n = int(int(offsets.get_shape()[3]) / 2)
1817+
kernel_n = int(int(offsets.get_shape()[3])/2)
18181818
input_h = input_shape[1]
18191819
input_w = input_shape[2]
18201820
channel = input_shape[3]
1821-
batch_channel = batch_size * input_shape[3]
18221821

18231822
# inputs (b, h, w, c) --> (b*c, h, w)
18241823
inputs = _to_bc_h_w(inputs, input_shape)
18251824

18261825
# offsets (b, h, w, 2*n) --> (b, h, w, n, 2)
18271826
offsets = tf.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2))
18281827
# offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2)
1829-
offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])
1828+
# offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])
18301829

18311830
coords = tf.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2)
1832-
coords = tf.tile(coords, [batch_channel, 1, 1, 1, 1]) + offsets # grid_offset --> (b*c, h, w, n, 2)
1831+
coords = tf.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2)
1832+
18331833
# clip out of bound
18341834
coords = tf.stack([tf.clip_by_value(coords[:, :, :, :, 0], 0.0, tf.cast(input_h - 1, 'float32')),
18351835
tf.clip_by_value(coords[:, :, :, :, 1], 0.0, tf.cast(input_w - 1, 'float32'))], axis=-1)
1836+
coords = tf.tile(coords, [channel, 1, 1, 1, 1])
18361837

18371838
mapped_vals = tf_batch_map_coordinates(inputs, coords)
18381839
# (b*c, h, w, n) --> (b, h, w, n, c)
@@ -4835,23 +4836,23 @@ class ConvLSTMLayer(Layer):
48354836
The `Layer` class feeding into this layer.
48364837
cell_shape : tuple, the shape of each cell width*height
48374838
filter_size : tuple, the size of filter width*height
4838-
cell_fn : a TensorFlow's core Convolutional RNN cell as follow.
4839+
cell_fn : a Convolutional RNN cell as follow.
48394840
feature_map : a int
48404841
The number of feature map in the layer.
48414842
initializer : initializer
48424843
The initializer for initializing the parameters.
48434844
n_steps : a int
48444845
The sequence length.
4845-
initial_state : None or RNN State
4846+
initial_state : None or ConvLSTM State
48464847
If None, initial_state is zero_state.
48474848
return_last : boolen
48484849
- If True, return the last output, "Sequence input and single output"
48494850
- If False, return all outputs, "Synced sequence input and output"
4850-
- In other word, if you want to apply one or more RNN(s) on this layer, set to False.
4851+
- In other word, if you want to apply one or more ConvLSTM(s) on this layer, set to False.
48514852
return_seq_2d : boolen
48524853
- When return_last = False
4853-
- If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it.
4854-
- If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it.
4854+
- If True, return 4D Tensor [n_example, h, w, c], for stacking DenseLayer after it.
4855+
- If False, return 5D Tensor [n_example/n_steps, h, w, c], for stacking multiple ConvLSTM after it.
48554856
name : a string or None
48564857
An optional name to attach to this layer.
48574858
@@ -4860,17 +4861,17 @@ class ConvLSTMLayer(Layer):
48604861
outputs : a tensor
48614862
The output of this RNN.
48624863
return_last = False, outputs = all cell_output, which is the hidden state.
4863-
cell_output.get_shape() = (?, n_hidden)
4864+
cell_output.get_shape() = (?, h, w, c])
48644865
48654866
final_state : a tensor or StateTuple
48664867
When state_is_tuple = False,
4867-
it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n
4868-
When state_is_tuple = True, it stores two elements: (c, h), in that order.
4868+
it is the final hidden and cell states,
4869+
When state_is_tuple = True,
48694870
You can get the final state after each iteration during training, then
48704871
feed it to the initial state of next iteration.
48714872
48724873
initial_state : a tensor or StateTuple
4873-
It is the initial state of this RNN layer, you can use it to initialize
4874+
It is the initial state of this ConvLSTM layer, you can use it to initialize
48744875
your state at the begining of each epoch or iteration according to your
48754876
training procedure.
48764877
@@ -4902,7 +4903,7 @@ def __init__(
49024903
# self.inputs.get_shape().with_rank(2)
49034904
# self.inputs.get_shape().with_rank(3)
49044905

4905-
# Input dimension should be rank 5 [batch_size, n_steps(max), n_features]
4906+
# Input dimension should be rank 5 [batch_size, n_steps(max), h, w, c]
49064907
try:
49074908
self.inputs.get_shape().with_rank(5)
49084909
except:
@@ -4920,16 +4921,7 @@ def __init__(
49204921
print(" non specified batch_size, uses a tensor instead.")
49214922
self.batch_size = batch_size
49224923

4923-
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
4924-
# This builds an unrolled LSTM for tutorial purposes only.
4925-
# In general, use the rnn() or state_saving_rnn() from rnn.py.
4926-
#
4927-
# The alternative version of the code below is:
4928-
#
4929-
# from tensorflow.models.rnn import rnn
4930-
# inputs = [tf.squeeze(input_, [1])
4931-
# for input_ in tf.split(1, num_steps, inputs)]
4932-
# outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state)
4924+
49334925
outputs = []
49344926
self.cell = cell = cell_fn(shape=cell_shape, filter_size=filter_size, num_features=feature_map)
49354927
if initial_state is None:
@@ -4954,11 +4946,11 @@ def __init__(
49544946
else:
49554947
if return_seq_2d:
49564948
# PTB tutorial: stack dense layer after that, or compute the cost from the output
4957-
# 2D Tensor [n_example, n_hidden]
4949+
# 4D Tensor [n_example, h, w, c]
49584950
self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, cell_shape[0] * cell_shape[1] * feature_map])
49594951
else:
49604952
# <akara>: stack more RNN layer after that
4961-
# 5D Tensor [n_example/n_steps, n_steps, n_hidden]
4953+
# 5D Tensor [n_example/n_steps, n_steps, h, w, c]
49624954
self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_steps, cell_shape[0],
49634955
cell_shape[1], feature_map])
49644956

0 commit comments

Comments
 (0)