@@ -196,31 +196,33 @@ def inference(x, is_training, num_steps, reuse=None):
196196 """
197197 print ("\n num_steps : %d, is_training : %s, reuse : %s" %
198198 (num_steps , is_training , reuse ))
199- initializer = tf .random_uniform_initializer (init_scale , init_scale )
199+ initializer = tf .random_uniform_initializer (- init_scale , init_scale )
200200 with tf .variable_scope ("model" , reuse = reuse ):
201201 tl .layers .set_name_reuse (reuse )
202202 network = tl .layers .EmbeddingInputlayer (
203- inputs = x ,
204- vocabulary_size = vocab_size ,
205- embedding_size = hidden_size ,
206- E_init = tf .random_uniform_initializer (- init_scale , init_scale ),
207- name = 'embedding_layer' )
208- network = tl .layers .DropoutLayer (network , keep = keep_prob , is_fix = True , is_train = is_training , name = 'drop1' )
203+ inputs = x ,
204+ vocabulary_size = vocab_size ,
205+ embedding_size = hidden_size ,
206+ E_init = initializer ,
207+ name = 'embedding' )
208+ network = tl .layers .DropoutLayer (network , keep = keep_prob ,
209+ is_fix = True , is_train = is_training , name = 'drop1' )
209210 network = tl .layers .RNNLayer (network ,
210211 cell_fn = tf .contrib .rnn .BasicLSTMCell ,#tf.nn.rnn_cell.BasicLSTMCell,
211212 cell_init_args = {'forget_bias' : 0.0 },# 'state_is_tuple': True},
212213 n_hidden = hidden_size ,
213- initializer = tf . random_uniform_initializer ( - init_scale , init_scale ) ,
214+ initializer = initializer ,
214215 n_steps = num_steps ,
215216 return_last = False ,
216217 name = 'basic_lstm_layer1' )
217218 lstm1 = network
218- network = tl .layers .DropoutLayer (network , keep = keep_prob , is_fix = True , is_train = is_training , name = 'drop2' )
219+ network = tl .layers .DropoutLayer (network , keep = keep_prob ,
220+ is_fix = True , is_train = is_training , name = 'drop2' )
219221 network = tl .layers .RNNLayer (network ,
220222 cell_fn = tf .contrib .rnn .BasicLSTMCell ,#tf.nn.rnn_cell.BasicLSTMCell,
221223 cell_init_args = {'forget_bias' : 0.0 }, # 'state_is_tuple': True},
222224 n_hidden = hidden_size ,
223- initializer = tf . random_uniform_initializer ( - init_scale , init_scale ) ,
225+ initializer = initializer ,
224226 n_steps = num_steps ,
225227 return_last = False ,
226228 return_seq_2d = True ,
@@ -230,12 +232,11 @@ def inference(x, is_training, num_steps, reuse=None):
230232 # you can reshape the outputs as follow:
231233 # network = tl.layers.ReshapeLayer(network,
232234 # shape=[-1, int(network.outputs._shape[-1])], name='reshape')
233- network = tl .layers .DropoutLayer (network , keep = keep_prob , is_fix = True , is_train = is_training , name = 'drop3' )
234- network = tl .layers .DenseLayer (network ,
235- n_units = vocab_size ,
236- W_init = tf .random_uniform_initializer (- init_scale , init_scale ),
237- b_init = tf .random_uniform_initializer (- init_scale , init_scale ),
238- act = tf .identity , name = 'output_layer' )
235+ network = tl .layers .DropoutLayer (network , keep = keep_prob ,
236+ is_fix = True , is_train = is_training , name = 'drop3' )
237+ network = tl .layers .DenseLayer (network , n_units = vocab_size ,
238+ W_init = initializer , b_init = initializer ,
239+ act = tf .identity , name = 'output' )
239240 return network , lstm1 , lstm2
240241
241242 # Inference for Training
0 commit comments