@@ -54,26 +54,26 @@ def discriminator(inputs, is_train=True, reuse=False):
5454 df_dim = 64 # Dimension of discrim filters in first conv layer. [64]
5555 w_init = tf .glorot_normal_initializer ()
5656 gamma_init = tf .random_normal_initializer (1. , 0.02 )
57-
57+ lrelu = lambda x : tf . nn . leaky_relu ( x , 0.2 )
5858 with tf .variable_scope ("discriminator" , reuse = reuse ):
59-
59+
6060 net_in = InputLayer (inputs , name = 'd/in' )
61- net_h0 = Conv2d (net_in , df_dim , (5 , 5 ), (2 , 2 ), act = tf . nn . leaky_relu ,
61+ net_h0 = Conv2d (net_in , df_dim , (5 , 5 ), (2 , 2 ), act = lrelu ,
6262 padding = 'SAME' , W_init = w_init , name = 'd/h0/conv2d' )
6363
6464 net_h1 = Conv2d (net_h0 , df_dim * 2 , (5 , 5 ), (2 , 2 ), act = None ,
6565 padding = 'SAME' , W_init = w_init , name = 'd/h1/conv2d' )
66- net_h1 = BatchNormLayer (net_h1 , act = tf . nn . leaky_relu ,
66+ net_h1 = BatchNormLayer (net_h1 , act = lrelu ,
6767 is_train = is_train , gamma_init = gamma_init , name = 'd/h1/batch_norm' )
6868
6969 net_h2 = Conv2d (net_h1 , df_dim * 4 , (5 , 5 ), (2 , 2 ), act = None ,
7070 padding = 'SAME' , W_init = w_init , name = 'd/h2/conv2d' )
71- net_h2 = BatchNormLayer (net_h2 , act = tf . nn . leaky_relu ,
71+ net_h2 = BatchNormLayer (net_h2 , act = lrelu ,
7272 is_train = is_train , gamma_init = gamma_init , name = 'd/h2/batch_norm' )
7373
7474 net_h3 = Conv2d (net_h2 , df_dim * 8 , (5 , 5 ), (2 , 2 ), act = None ,
7575 padding = 'SAME' , W_init = w_init , name = 'd/h3/conv2d' )
76- net_h3 = BatchNormLayer (net_h3 , act = tf . nn . leaky_relu ,
76+ net_h3 = BatchNormLayer (net_h3 , act = lrelu ,
7777 is_train = is_train , gamma_init = gamma_init , name = 'd/h3/batch_norm' )
7878
7979 net_h4 = FlattenLayer (net_h3 , name = 'd/h4/flatten' )
0 commit comments