@@ -83,7 +83,9 @@ def data_to_tfrecord(images, labels, filename):
8383 feature = {
8484 "label" : tf .train .Feature (int64_list = tf .train .Int64List (value = [label ])),
8585 'img_raw' : tf .train .Feature (bytes_list = tf .train .BytesList (value = [img_raw ])),
86- }))
86+ }
87+ )
88+ )
8789 writer .write (example .SerializeToString ()) # Serialize To String
8890 writer .close ()
8991
@@ -97,12 +99,13 @@ def read_and_decode(filename, is_train=None):
9799 serialized_example , features = {
98100 'label' : tf .FixedLenFeature ([], tf .int64 ),
99101 'img_raw' : tf .FixedLenFeature ([], tf .string ),
100- })
102+ }
103+ )
101104 # You can do more image distortion here for training data
102105 img = tf .decode_raw (features ['img_raw' ], tf .float32 )
103106 img = tf .reshape (img , [32 , 32 , 3 ])
104107 # img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
105- if is_train == True :
108+ if is_train == True :
106109 # 1. Randomly crop a [height, width] section of the image.
107110 img = tf .random_crop (img , [24 , 24 , 3 ])
108111 # 2. Randomly flip the image horizontally.
@@ -147,9 +150,12 @@ def read_and_decode(filename, is_train=None):
147150 x_test_ , y_test_ = read_and_decode ("test.cifar10" , False )
148151
149152 x_train_batch , y_train_batch = tf .train .shuffle_batch (
150- [x_train_ , y_train_ ], batch_size = batch_size , capacity = 2000 , min_after_dequeue = 1000 , num_threads = 32 ) # set the number of threads here
153+ [x_train_ , y_train_ ], batch_size = batch_size , capacity = 2000 , min_after_dequeue = 1000 , num_threads = 32
154+ ) # set the number of threads here
151155 # for testing, uses batch instead of shuffle_batch
152- x_test_batch , y_test_batch = tf .train .batch ([x_test_ , y_test_ ], batch_size = batch_size , capacity = 50000 , num_threads = 32 )
156+ x_test_batch , y_test_batch = tf .train .batch (
157+ [x_test_ , y_test_ ], batch_size = batch_size , capacity = 50000 , num_threads = 32
158+ )
153159
154160 def model (x_crop , y_ , reuse ):
155161 """ For more simplified CNN APIs, check tensorlayer.org """
@@ -161,16 +167,28 @@ def model(x_crop, y_, reuse):
161167 net = tl .layers .Conv2d (net , 64 , (5 , 5 ), (1 , 1 ), act = tf .nn .relu , padding = 'SAME' , W_init = W_init , name = 'cnn1' )
162168 net = tl .layers .SignLayer (net )
163169 net = tl .layers .MaxPool2d (net , (3 , 3 ), (2 , 2 ), padding = 'SAME' , name = 'pool1' )
164- net = tl .layers .LocalResponseNormLayer (net , depth_radius = 4 , bias = 1.0 , alpha = 0.001 / 9.0 , beta = 0.75 , name = 'norm1' )
165- net = tl .layers .BinaryConv2d (net , 64 , (5 , 5 ), (1 , 1 ), act = tf .nn .relu , padding = 'SAME' , W_init = W_init , name = 'cnn2' )
166- net = tl .layers .LocalResponseNormLayer (net , depth_radius = 4 , bias = 1.0 , alpha = 0.001 / 9.0 , beta = 0.75 , name = 'norm2' )
170+ net = tl .layers .LocalResponseNormLayer (
171+ net , depth_radius = 4 , bias = 1.0 , alpha = 0.001 / 9.0 , beta = 0.75 , name = 'norm1'
172+ )
173+ net = tl .layers .BinaryConv2d (
174+ net , 64 , (5 , 5 ), (1 , 1 ), act = tf .nn .relu , padding = 'SAME' , W_init = W_init , name = 'cnn2'
175+ )
176+ net = tl .layers .LocalResponseNormLayer (
177+ net , depth_radius = 4 , bias = 1.0 , alpha = 0.001 / 9.0 , beta = 0.75 , name = 'norm2'
178+ )
167179 net = tl .layers .MaxPool2d (net , (3 , 3 ), (2 , 2 ), padding = 'SAME' , name = 'pool2' )
168180 net = tl .layers .FlattenLayer (net , name = 'flatten' ) # output: (batch_size, 2304)
169181 net = tl .layers .SignLayer (net )
170- net = tl .layers .BinaryDenseLayer (net , n_units = 384 , act = tf .nn .relu , W_init = W_init2 , b_init = b_init2 , name = 'd1relu' ) # output: (batch_size, 384)
182+ net = tl .layers .BinaryDenseLayer (
183+ net , n_units = 384 , act = tf .nn .relu , W_init = W_init2 , b_init = b_init2 , name = 'd1relu'
184+ ) # output: (batch_size, 384)
171185 net = tl .layers .SignLayer (net )
172- net = tl .layers .BinaryDenseLayer (net , n_units = 192 , act = tf .nn .relu , W_init = W_init2 , b_init = b_init2 , name = 'd2relu' ) # output: (batch_size, 192)
173- net = tl .layers .DenseLayer (net , n_units = 10 , act = tf .identity , W_init = W_init2 , name = 'output' ) # output: (batch_size, 10)
186+ net = tl .layers .BinaryDenseLayer (
187+ net , n_units = 192 , act = tf .nn .relu , W_init = W_init2 , b_init = b_init2 , name = 'd2relu'
188+ ) # output: (batch_size, 192)
189+ net = tl .layers .DenseLayer (
190+ net , n_units = 10 , act = tf .identity , W_init = W_init2 , name = 'output'
191+ ) # output: (batch_size, 10)
174192 y = net .outputs
175193
176194 ce = tl .cost .cross_entropy (y , y_ , name = 'cost' )
@@ -201,9 +219,15 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
201219 net = tl .layers .BatchNormLayer (net , is_train , act = tf .nn .relu , name = 'batch2' )
202220 net = tl .layers .MaxPool2d (net , (3 , 3 ), (2 , 2 ), padding = 'SAME' , name = 'pool2' )
203221 net = tl .layers .FlattenLayer (net , name = 'flatten' ) # output: (batch_size, 2304)
204- net = tl .layers .DenseLayer (net , n_units = 384 , act = tf .nn .relu , W_init = W_init2 , b_init = b_init2 , name = 'd1relu' ) # output: (batch_size, 384)
205- net = tl .layers .DenseLayer (net , n_units = 192 , act = tf .nn .relu , W_init = W_init2 , b_init = b_init2 , name = 'd2relu' ) # output: (batch_size, 192)
206- net = tl .layers .DenseLayer (net , n_units = 10 , act = tf .identity , W_init = W_init2 , name = 'output' ) # output: (batch_size, 10)
222+ net = tl .layers .DenseLayer (
223+ net , n_units = 384 , act = tf .nn .relu , W_init = W_init2 , b_init = b_init2 , name = 'd1relu'
224+ ) # output: (batch_size, 384)
225+ net = tl .layers .DenseLayer (
226+ net , n_units = 192 , act = tf .nn .relu , W_init = W_init2 , b_init = b_init2 , name = 'd2relu'
227+ ) # output: (batch_size, 192)
228+ net = tl .layers .DenseLayer (
229+ net , n_units = 10 , act = tf .identity , W_init = W_init2 , name = 'output'
230+ ) # output: (batch_size, 10)
207231 y = net .outputs
208232
209233 ce = tl .cost .cross_entropy (y , y_ , name = 'cost' )
@@ -273,7 +297,10 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
273297 n_batch += 1
274298
275299 if epoch + 1 == 1 or (epoch + 1 ) % print_freq == 0 :
276- print ("Epoch %d : Step %d-%d of %d took %fs" % (epoch , step , step + n_step_epoch , n_step , time .time () - start_time ))
300+ print (
301+ "Epoch %d : Step %d-%d of %d took %fs" %
302+ (epoch , step , step + n_step_epoch , n_step , time .time () - start_time )
303+ )
277304 print (" train loss: %f" % (train_loss / n_batch ))
278305 print (" train acc: %f" % (train_acc / n_batch ))
279306
0 commit comments