@@ -100,18 +100,19 @@ def model_batch_norm(x, y_, reuse, is_train):
100100
101101 net = FlattenLayer (net , name = 'flatten' ) # output: (batch_size, 2304)
102102 net = DenseLayer (net , n_units = 384 , act = tf .nn .relu ,
103- W_init = W_init2 , b_init = b_init2 , name = 'relu1 ' ) # output: (batch_size, 384)
103+ W_init = W_init2 , b_init = b_init2 , name = 'd1relu ' ) # output: (batch_size, 384)
104104 net = DenseLayer (net , n_units = 192 , act = tf .nn .relu ,
105- W_init = W_init2 , b_init = b_init2 , name = 'relu2 ' ) # output: (batch_size, 192)
105+ W_init = W_init2 , b_init = b_init2 , name = 'd2relu ' ) # output: (batch_size, 192)
106106 net = DenseLayer (net , n_units = 10 , act = tf .identity ,
107107 W_init = tf .truncated_normal_initializer (stddev = 1 / 192.0 ),
108108 name = 'output' ) # output: (batch_size, 10)
109109 y = net .outputs
110110
111111 ce = tl .cost .cross_entropy (y , y_ , name = 'cost' )
112112 # L2 for the MLP, without this, the accuracy will be reduced by 15%.
113- L2 = tf .contrib .layers .l2_regularizer (0.004 )(net .all_params [4 ]) + \
114- tf .contrib .layers .l2_regularizer (0.004 )(net .all_params [6 ])
113+ L2 = 0
114+ for p in tl .layers .get_variables_with_name ('relu/W' , True , True ):
115+ L2 += tf .contrib .layers .l2_regularizer (0.004 )(p )
115116 cost = ce + L2
116117
117118 correct_prediction = tf .equal (tf .argmax (y , 1 ), y_ )
@@ -152,8 +153,10 @@ def distort_fn(x, is_train=False):
152153x = tf .placeholder (tf .float32 , shape = [None , 24 , 24 , 3 ], name = 'x' )
153154y_ = tf .placeholder (tf .int64 , shape = [None , ], name = 'y_' )
154155
156+ ## using local response normalization
155157# network, cost, _ = model(x, y_, False)
156158# _, cost_test, acc = model(x, y_, True)
159+ ## you may want to try batch normalization
157160network , cost , _ = model_batch_norm (x , y_ , False , is_train = True )
158161_ , cost_test , acc = model_batch_norm (x , y_ , True , is_train = False )
159162
@@ -194,7 +197,7 @@ def distort_fn(x, is_train=False):
194197 # print(" train acc: %f" % (train_acc/ n_batch))
195198 test_loss , test_acc , n_batch = 0 , 0 , 0
196199 for X_test_a , y_test_a in tl .iterate .minibatches (
197- X_test , y_test , batch_size , shuffle = True ):
200+ X_test , y_test , batch_size , shuffle = False ):
198201 X_test_a = tl .prepro .threading_data (X_test_a , fn = distort_fn , is_train = False ) # central crop
199202 err , ac = sess .run ([cost_test , acc ], feed_dict = {x : X_test_a , y_ : y_test_a })
200203 test_loss += err ; test_acc += ac ; n_batch += 1
0 commit comments