@@ -700,9 +700,9 @@ def __init__(
700700 print (" learning_rate: %f" % learning_rate )
701701
702702 # Mean-squre-error i.e. quadratic-cost
703- mse = tf .reduce_sum (tf .squared_difference (y , x_recon ), reduction_indices = 1 )
703+ mse = tf .reduce_sum (tf .squared_difference (y , x_recon ), 1 )
704704 mse = tf .reduce_mean (mse ) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean()
705- # mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), reduction_indices = 1))
705+ # mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), 1))
706706 # mse = tf.reduce_mean(tf.squared_difference(y, x_recon)) # <haodong>: Error
707707 # mse = tf.sqrt(tf.reduce_mean(tf.square(y - x_recon))) # <haodong>: Error
708708 # Cross-entropy
@@ -719,13 +719,16 @@ def __init__(
719719 # L1 of activation outputs
720720 activation_out = self .all_layers [- 2 ]
721721 L1_a = 0.001 * tf .reduce_mean (activation_out ) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black
722- # L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, reduction_indices= 0) ) # <haodong>: some neuron are broken, white and black
723- # L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, reduction_indices= 1) ) # <haodong>: some neuron are broken, white and black
722+ # L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # <haodong>: some neuron are broken, white and black
723+ # L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # <haodong>: some neuron are broken, white and black
724724 # KL Divergence
725725 beta = 4
726726 rho = 0.15
727- p_hat = tf .reduce_mean (activation_out , reduction_indices = 0 ) # theano: p_hat = T.mean( self.a[i], axis=0 )
728- KLD = beta * tf .reduce_sum ( rho * tf .log (tf .div (rho , p_hat )) + (1 - rho ) * tf .log ((1 - rho )/ (tf .sub (float (1 ), p_hat ))) )
727+ p_hat = tf .reduce_mean (activation_out , 0 ) # theano: p_hat = T.mean( self.a[i], axis=0 )
728+ try : ## TF1.0
729+ KLD = beta * tf .reduce_sum ( rho * tf .log (tf .divide (rho , p_hat )) + (1 - rho ) * tf .log ((1 - rho )/ (tf .subtract (float (1 ), p_hat ))) )
730+ except : ## TF0.12
731+ KLD = beta * tf .reduce_sum ( rho * tf .log (tf .div (rho , p_hat )) + (1 - rho ) * tf .log ((1 - rho )/ (tf .sub (float (1 ), p_hat ))) )
729732 # KLD = beta * tf.reduce_sum( rho * tf.log(rho/ p_hat) + (1- rho) * tf.log((1- rho)/(1- p_hat)) )
730733 # theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) )
731734 # Total cost
@@ -3223,7 +3226,10 @@ def __init__(
32233226 self .bw_initial_state = bw_initial_state
32243227 # exit()
32253228 # Feedforward to MultiRNNCell
3226- list_rnn_inputs = tf .unpack (self .inputs , axis = 1 )
3229+ try : ## TF1.0
3230+ list_rnn_inputs = tf .unstack (self .inputs , axis = 1 )
3231+ except : ## TF0.12
3232+ list_rnn_inputs = tf .unpack (self .inputs , axis = 1 )
32273233 outputs , fw_state , bw_state = tf .nn .bidirectional_rnn (
32283234 cell_fw = self .fw_cell ,
32293235 cell_bw = self .bw_cell ,
@@ -3340,8 +3346,12 @@ def retrieve_seq_length_op(data):
33403346 - Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`_.
33413347 """
33423348 with tf .name_scope ('GetLength' ):
3343- used = tf .sign (tf .reduce_max (tf .abs (data ), reduction_indices = 2 ))
3344- length = tf .reduce_sum (used , reduction_indices = 1 )
3349+ ## TF 1.0 change reduction_indices to axis
3350+ used = tf .sign (tf .reduce_max (tf .abs (data ), 2 ))
3351+ length = tf .reduce_sum (used , 1 )
3352+ ## TF < 1.0
3353+ # used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2))
3354+ # length = tf.reduce_sum(used, reduction_indices=1)
33453355 length = tf .cast (length , tf .int32 )
33463356 return length
33473357
@@ -3535,8 +3545,12 @@ def __init__(
35353545
35363546 # Computes sequence_length
35373547 if sequence_length is None :
3538- sequence_length = retrieve_seq_length_op (
3539- self .inputs if isinstance (self .inputs , tf .Tensor ) else tf .pack (self .inputs ))
3548+ try : ## TF1.0
3549+ sequence_length = retrieve_seq_length_op (
3550+ self .inputs if isinstance (self .inputs , tf .Tensor ) else tf .stack (self .inputs ))
3551+ except : ## TF0.12
3552+ sequence_length = retrieve_seq_length_op (
3553+ self .inputs if isinstance (self .inputs , tf .Tensor ) else tf .pack (self .inputs ))
35403554
35413555 # Main - Computes outputs and last_states
35423556 with tf .variable_scope (name , initializer = initializer ) as vs :
@@ -3554,7 +3568,7 @@ def __init__(
35543568 # Manage the outputs
35553569 if return_last :
35563570 # [batch_size, n_hidden]
3557- # outputs = tf.transpose(tf.pack(outputs), [1, 0, 2])
3571+ # outputs = tf.transpose(tf.pack(outputs), [1, 0, 2]) # TF1.0 tf.pack --> tf.stack
35583572 self .outputs = advanced_indexing_op (outputs , sequence_length )
35593573 else :
35603574 # [batch_size, n_step(max), n_hidden]
@@ -3742,8 +3756,12 @@ def __init__(
37423756 self .bw_initial_state = bw_initial_state
37433757 # Computes sequence_length
37443758 if sequence_length is None :
3745- sequence_length = retrieve_seq_length_op (
3746- self .inputs if isinstance (self .inputs , tf .Tensor ) else tf .pack (self .inputs ))
3759+ try : ## TF1.0
3760+ sequence_length = retrieve_seq_length_op (
3761+ self .inputs if isinstance (self .inputs , tf .Tensor ) else tf .stack (self .inputs ))
3762+ except : ## TF0.12
3763+ sequence_length = retrieve_seq_length_op (
3764+ self .inputs if isinstance (self .inputs , tf .Tensor ) else tf .pack (self .inputs ))
37473765
37483766 outputs , (states_fw , states_bw ) = tf .nn .bidirectional_dynamic_rnn (
37493767 cell_fw = self .fw_cell ,
@@ -4230,7 +4248,7 @@ class ElementwiseLayer(Layer):
42304248 layer : a list of :class:`Layer` instances
42314249 The `Layer` class feeding into this layer.
42324250 combine_fn : a TensorFlow elemwise-merge function
4233- e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.mul `` and so on.
4251+ e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.multiply `` and so on.
42344252 See `TensorFlow Math API <https://www.tensorflow.org/versions/master/api_docs/python/math_ops.html#math>`_ .
42354253 name : a string or None
42364254 An optional name to attach to this layer.
@@ -4302,7 +4320,7 @@ def __init__(
43024320
43034321 print (" tensorlayer:Instantiate ExpandDimsLayer %s" % self .name )
43044322 with tf .variable_scope (name ) as vs :
4305- try : # TF12
4323+ try : # TF12 TF1.0
43064324 self .outputs = tf .expand_dims (self .inputs , axis = axis )
43074325 except : # TF11
43084326 self .outputs = tf .expand_dims (self .inputs , dim = axis )
@@ -4451,7 +4469,10 @@ def __init__(
44514469 # with tf.name_scope(name) as scope:
44524470 with tf .variable_scope (name ) as vs :
44534471 alphas = tf .get_variable (name = 'alphas' , shape = w_shape , initializer = a_init , ** a_init_args )
4454- self .outputs = tf .nn .relu (self .inputs ) + tf .mul (alphas , (self .inputs - tf .abs (self .inputs ))) * 0.5
4472+ try : ## TF 1.0
4473+ self .outputs = tf .nn .relu (self .inputs ) + tf .mulitply (alphas , (self .inputs - tf .abs (self .inputs ))) * 0.5
4474+ except : ## TF 0.12
4475+ self .outputs = tf .nn .relu (self .inputs ) + tf .mul (alphas , (self .inputs - tf .abs (self .inputs ))) * 0.5
44554476
44564477 self .all_layers = list (layer .all_layers )
44574478 self .all_params = list (layer .all_params )
@@ -4512,7 +4533,7 @@ class MultiplexerLayer(Layer):
45124533
45134534 References
45144535 ------------
4515- - See ``tf.pack()`` and ``tf.gather()`` at `TensorFlow - Slicing and Joining <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#slicing-and-joining>`_
4536+ - See ``tf.pack() for TF0.12 or tf.stack() for TF1.0 `` and ``tf.gather()`` at `TensorFlow - Slicing and Joining <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#slicing-and-joining>`_
45164537 """
45174538 def __init__ (self ,
45184539 layer = [],
@@ -4523,7 +4544,10 @@ def __init__(self,
45234544 self .inputs = []
45244545 for l in layer :
45254546 self .inputs .append (l .outputs )
4526- all_inputs = tf .pack (self .inputs , name = name ) # pack means concat a list of tensor in a new dim # 1.2
4547+ try : ## TF1.0
4548+ all_inputs = tf .stack (self .inputs , name = name ) # pack means concat a list of tensor in a new dim # 1.2
4549+ except :
4550+ all_inputs = tf .pack (self .inputs , name = name ) # pack means concat a list of tensor in a new dim # 1.2
45274551
45284552 print (" tensorlayer:Instantiate MultiplexerLayer %s: n_inputs: %d" % (self .name , self .n_inputs ))
45294553
0 commit comments