@@ -113,18 +113,18 @@ def read_and_decode(filename, is_train=None):
113113 # 4. Randomly change contrast.
114114 img = tf .image .random_contrast (img , lower = 0.2 , upper = 1.8 )
115115 # 5. Subtract off the mean and divide by the variance of the pixels.
116- try : # TF12
116+ try : # TF 0.12+
117117 img = tf .image .per_image_standardization (img )
118- except : #earlier TF versions
118+ except : # earlier TF versions
119119 img = tf .image .per_image_whitening (img )
120120
121121 elif is_train == False :
122122 # 1. Crop the central [height, width] of the image.
123123 img = tf .image .resize_image_with_crop_or_pad (img , 24 , 24 )
124124 # 2. Subtract off the mean and divide by the variance of the pixels.
125- try : # TF12
125+ try : # TF 0.12+
126126 img = tf .image .per_image_standardization (img )
127- except : #earlier TF versions
127+ except : # earlier TF versions
128128 img = tf .image .per_image_whitening (img )
129129 elif is_train == None :
130130 img = img
@@ -201,19 +201,22 @@ def inference(x_crop, y_, reuse):
201201 network = tl .layers .InputLayer (x_crop , name = 'input' )
202202 network = tl .layers .Conv2dLayer (network , act = tf .nn .relu ,
203203 shape = [5 , 5 , 3 , 64 ], strides = [1 , 1 , 1 , 1 ], padding = 'SAME' , # 64 features for each 5x5x3 patch
204- W_init = W_init , b_init = b_init , name = 'cnn1 ' ) # output: (batch_size, 24, 24, 64)
204+ W_init = W_init , b_init = b_init , name = 'cnn_layer1 ' ) # output: (batch_size, 24, 24, 64)
205205 network = tl .layers .PoolLayer (network , ksize = [1 , 3 , 3 , 1 ],
206206 strides = [1 , 2 , 2 , 1 ], padding = 'SAME' ,
207207 pool = tf .nn .max_pool , name = 'pool1' ,) # output: (batch_size, 12, 12, 64)
208- # you can also use tl.layers.LocalResponseNormLayer
209- network .outputs = tf .nn .lrn (network .outputs , 4 , bias = 1.0 , alpha = 0.001 / 9.0 ,
210- beta = 0.75 , name = 'norm1' )
208+ # network.outputs = tf.nn.lrn(network.outputs, 4, bias=1.0, alpha=0.001 / 9.0,
209+ # beta=0.75, name='norm1')
210+ network = tl .layers .LocalResponseNormLayer (network , depth_radius = 4 , bias = 1.0 ,
211+ alpha = 0.001 / 9.0 , beta = 0.75 , name = 'norm1' )
211212
212213 network = tl .layers .Conv2dLayer (network , act = tf .nn .relu ,
213214 shape = [5 , 5 , 64 , 64 ], strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ,# 64 features for each 5x5 patch
214215 W_init = W_init , b_init = b_init , name = 'cnn2' ) # output: (batch_size, 12, 12, 64)
215- network .outputs = tf .nn .lrn (network .outputs , 4 , bias = 1.0 , alpha = 0.001 / 9.0 ,
216- beta = 0.75 , name = 'norm2' )
216+ # network.outputs = tf.nn.lrn(network.outputs, 4, bias=1.0, alpha=0.001 / 9.0,
217+ # beta=0.75, name='norm2')
218+ network = tl .layers .LocalResponseNormLayer (network , depth_radius = 4 , bias = 1.0 ,
219+ alpha = 0.001 / 9.0 , beta = 0.75 , name = 'norm2' )
217220 network = tl .layers .PoolLayer (network , ksize = [1 , 3 , 3 , 1 ],
218221 strides = [1 , 2 , 2 , 1 ], padding = 'SAME' ,
219222 pool = tf .nn .max_pool , name = 'pool2' ) # output: (batch_size, 6, 6, 64)
@@ -252,35 +255,35 @@ def inference_batch_norm(x_crop, y_, reuse, is_train):
252255 b_init2 = tf .constant_initializer (value = 0.1 )
253256 with tf .variable_scope ("model" , reuse = reuse ):
254257 tl .layers .set_name_reuse (reuse )
255- network = tl .layers .InputLayer (x_crop , name = 'input_layer ' )
258+ network = tl .layers .InputLayer (x_crop , name = 'input ' )
256259
257260 network = tl .layers .Conv2dLayer (network , act = tf .identity ,
258261 shape = [5 , 5 , 3 , 64 ], strides = [1 , 1 , 1 , 1 ], padding = 'SAME' , # 64 features for each 5x5x3 patch
259- W_init = W_init , b_init = None , name = 'cnn_layer1 ' ) # output: (batch_size, 24, 24, 64)
262+ W_init = W_init , b_init = None , name = 'cnn1 ' ) # output: (batch_size, 24, 24, 64)
260263 network = tl .layers .BatchNormLayer (network , is_train = is_train ,
261- act = tf .nn .relu , name = 'batch_norm1 ' )
264+ act = tf .nn .relu , name = 'batch1 ' )
262265 network = tl .layers .PoolLayer (network , ksize = [1 , 3 , 3 , 1 ],
263266 strides = [1 , 2 , 2 , 1 ], padding = 'SAME' ,
264- pool = tf .nn .max_pool , name = 'pool_layer1 ' ,) # output: (batch_size, 12, 12, 64)
267+ pool = tf .nn .max_pool , name = 'pool1 ' ,) # output: (batch_size, 12, 12, 64)
265268
266269 network = tl .layers .Conv2dLayer (network , act = tf .identity ,
267270 shape = [5 , 5 , 64 , 64 ], strides = [1 , 1 , 1 , 1 ], padding = 'SAME' ,# 64 features for each 5x5 patch
268- W_init = W_init , b_init = None , name = 'cnn_layer2 ' ) # output: (batch_size, 12, 12, 64)
271+ W_init = W_init , b_init = None , name = 'cnn2 ' ) # output: (batch_size, 12, 12, 64)
269272 network = tl .layers .BatchNormLayer (network , is_train = is_train ,
270- act = tf .nn .relu , name = 'batch_norm2 ' )
273+ act = tf .nn .relu , name = 'batch2 ' )
271274 network = tl .layers .PoolLayer (network , ksize = [1 , 3 , 3 , 1 ],
272275 strides = [1 , 2 , 2 , 1 ], padding = 'SAME' ,
273- pool = tf .nn .max_pool , name = 'pool_layer2 ' ) # output: (batch_size, 6, 6, 64)
276+ pool = tf .nn .max_pool , name = 'pool2 ' ) # output: (batch_size, 6, 6, 64)
274277
275- network = tl .layers .FlattenLayer (network , name = 'flatten_layer ' ) # output: (batch_size, 2304)
278+ network = tl .layers .FlattenLayer (network , name = 'flatten ' ) # output: (batch_size, 2304)
276279 network = tl .layers .DenseLayer (network , n_units = 384 , act = tf .nn .relu ,
277280 W_init = W_init2 , b_init = b_init2 , name = 'relu1' ) # output: (batch_size, 384)
278281 network = tl .layers .DenseLayer (network , n_units = 192 , act = tf .nn .relu ,
279282 W_init = W_init2 , b_init = b_init2 , name = 'relu2' ) # output: (batch_size, 192)
280283 network = tl .layers .DenseLayer (network , n_units = 10 , act = tf .identity ,
281284 W_init = tf .truncated_normal_initializer (stddev = 1 / 192.0 ),
282285 b_init = tf .constant_initializer (value = 0.0 ),
283- name = 'output_layer ' ) # output: (batch_size, 10)
286+ name = 'output ' ) # output: (batch_size, 10)
284287 y = network .outputs
285288
286289 ce = tl .cost .cross_entropy (y , y_ , name = 'cost' )
0 commit comments