@@ -221,7 +221,7 @@ def get_variables_with_name(name=None, train_only=True, printable=False):
221221 else :
222222 try : # TF1.0+
223223 t_vars = tf .global_variables ()
224- except : # TF0.12
224+ except Exception : # TF0.12
225225 t_vars = tf .all_variables ()
226226
227227 d_vars = [var for var in t_vars if name in var .name ]
@@ -418,13 +418,13 @@ def print_layers(self):
418418 def count_params (self ):
419419 """Return the number of parameters in the network"""
420420 n_params = 0
421- for i , p in enumerate (self .all_params ):
421+ for _i , p in enumerate (self .all_params ):
422422 n = 1
423423 # for s in p.eval().shape:
424424 for s in p .get_shape ():
425425 try :
426426 s = int (s )
427- except :
427+ except Exception :
428428 s = 1
429429 if s :
430430 n = n * s
@@ -610,14 +610,15 @@ def __init__(
610610 Layer .__init__ (self , name = name )
611611 self .inputs = inputs
612612 logging .info ("Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self .name , vocabulary_size , embedding_size ))
613+
613614 # Look up embeddings for inputs.
614615 # Note: a row of 'embeddings' is the vector representation of a word.
615616 # for the sake of speed, it is better to slice the embedding matrix
616617 # instead of transfering a word id to one-hot-format vector and then
617618 # multiply by the embedding matrix.
618619 # embed is the outputs of the hidden layer (embedding layer), it is a
619620 # row vector with 'embedding_size' values.
620- with tf .variable_scope (name ) as vs :
621+ with tf .variable_scope (name ):
621622 embeddings = tf .get_variable (name = 'embeddings' , shape = (vocabulary_size , embedding_size ), initializer = E_init , dtype = D_TYPE , ** E_init_args )
622623 embed = tf .nn .embedding_lookup (embeddings , self .inputs )
623624 # Construct the variables for the NCE loss (i.e. negative sampling)
@@ -878,7 +879,7 @@ def __init__(
878879 if b_init is not None :
879880 try :
880881 b = tf .get_variable (name = 'b' , shape = (n_units ), initializer = b_init , dtype = D_TYPE , ** b_init_args )
881- except : # If initializer is a constant, do not specify shape.
882+ except Exception : # If initializer is a constant, do not specify shape.
882883 b = tf .get_variable (name = 'b' , initializer = b_init , dtype = D_TYPE , ** b_init_args )
883884 self .outputs = act (tf .matmul (self .inputs , W ) + b )
884885 else :
@@ -982,10 +983,11 @@ def __init__(
982983 L2_w = tf .contrib .layers .l2_regularizer (lambda_l2_w )(self .train_params [0 ]) \
983984 + tf .contrib .layers .l2_regularizer (lambda_l2_w )(self .train_params [2 ]) # faster than the code below
984985 # L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2]))
986+
985987 # DropNeuro
986- P_o = cost .lo_regularizer (0.03 )(
987- self .train_params [0 ]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # <haodong>: if add lo on decoder, no neuron will be broken
988- P_i = cost .li_regularizer (0.03 )(self .train_params [0 ]) # + cost.li_regularizer(0.001)(self.train_params[2])
988+ # P_o = cost.lo_regularizer(0.03)(
989+ # self.train_params[0]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # <haodong>: if add lo on decoder, no neuron will be broken
990+ # P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2])
989991
990992 # L1 of activation outputs
991993 activation_out = self .all_layers [- 2 ]
@@ -1082,7 +1084,7 @@ def pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batc
10821084 visualize .draw_weights (
10831085 self .train_params [0 ].eval (), second = 10 , saveable = True , shape = [28 , 28 ], name = save_name + str (epoch + 1 ), fig_idx = 2012 )
10841086 files .save_npz ([self .all_params [0 ]], name = save_name + str (epoch + 1 ) + '.npz' )
1085- except :
1087+ except Exception :
10861088 raise Exception (
10871089 "You should change the visualize.W() in ReconLayer.pretrain(), if you want to save the feature images for different dataset" )
10881090
0 commit comments