|
84 | 84 | if net.count_params() != 7160: |
85 | 85 | raise Exception("params dont match") |
86 | 86 |
|
| 87 | +# n_layer=2 |
| 88 | +net = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=vocab_size, embedding_size=hidden_size, name='emb2') |
| 89 | +net = tl.layers.BiRNNLayer( |
| 90 | + net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=hidden_size, n_steps=num_steps, n_layer=2, return_last=False, return_seq_2d=False, name='birnn2') |
| 91 | + |
| 92 | +# net.print_layers() |
| 93 | +# net.print_params(False) |
| 94 | +# |
| 95 | +# shape = net.outputs.get_shape().as_list() |
| 96 | +# if shape[1:3] != [num_steps, hidden_size * 2]: |
| 97 | +# raise Exception("shape dont match") |
| 98 | +# |
| 99 | +# if len(net.all_layers) != 2: |
| 100 | +# raise Exception("layers dont match") |
| 101 | +# |
| 102 | +# if len(net.all_params) != 5: |
| 103 | +# raise Exception("params dont match") |
| 104 | +# |
| 105 | +# if net.count_params() != 7160: |
| 106 | +# raise Exception("params dont match") |
| 107 | +# |
| 108 | +# exit() |
| 109 | + |
87 | 110 | ## ConvLSTMLayer TODO |
88 | 111 | # image_size = 100 |
89 | 112 | # batch_size = 10 |
|
141 | 164 | if net.count_params() != 4510: |
142 | 165 | raise Exception("params dont match") |
143 | 166 |
|
| 167 | +# n_layer=3 |
| 168 | +nin = tl.layers.EmbeddingInputlayer(inputs=input_seqs, vocabulary_size=vocab_size, embedding_size=embedding_size, name='seq_embedding2') |
| 169 | +rnn = tl.layers.DynamicRNNLayer( |
| 170 | + nin, |
| 171 | + cell_fn=tf.contrib.rnn.BasicLSTMCell, |
| 172 | + n_hidden=embedding_size, |
| 173 | + dropout=(keep_prob if is_train else None), |
| 174 | + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), |
| 175 | + n_layer=3, |
| 176 | + return_last=False, |
| 177 | + return_seq_2d=True, |
| 178 | + name='dynamicrnn2') |
| 179 | +net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o2") |
| 180 | + |
144 | 181 | ## BiDynamic Synced input and output |
145 | 182 | rnn = tl.layers.BiDynamicRNNLayer( |
146 | 183 | nin, |
|
151 | 188 | return_last=False, |
152 | 189 | return_seq_2d=True, |
153 | 190 | name='bidynamicrnn') |
154 | | -net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o2") |
| 191 | +net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o3") |
155 | 192 |
|
156 | 193 | net.print_layers() |
157 | 194 | net.print_params(False) |
|
173 | 210 | if net.count_params() != 8390: |
174 | 211 | raise Exception("params dont match") |
175 | 212 |
|
| 213 | +# n_layer=2 |
| 214 | +rnn = tl.layers.BiDynamicRNNLayer( |
| 215 | + nin, |
| 216 | + cell_fn=tf.contrib.rnn.BasicLSTMCell, |
| 217 | + n_hidden=embedding_size, |
| 218 | + dropout=(keep_prob if is_train else None), |
| 219 | + sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), |
| 220 | + return_last=False, |
| 221 | + return_seq_2d=True, |
| 222 | + name='bidynamicrnn2') |
| 223 | +net = tl.layers.DenseLayer(rnn, n_units=vocab_size, name="o3") |
| 224 | + |
176 | 225 | ## Seq2Seq |
177 | 226 | from tensorlayer.layers import EmbeddingInputlayer, Seq2Seq, retrieve_seq_length_op2, DenseLayer |
178 | 227 | batch_size = 32 |
|
198 | 247 | decode_sequence_length=retrieve_seq_length_op2(decode_seqs), |
199 | 248 | initial_state_encode=None, |
200 | 249 | dropout=None, |
201 | | - n_layer=1, |
| 250 | + n_layer=2, |
202 | 251 | return_seq_2d=True, |
203 | 252 | name='Seq2seq') |
204 | 253 | net = DenseLayer(net, n_units=10000, act=tf.identity, name='oo') |
|
0 commit comments