Skip to content

Commit 256fa2c

Browse files
zsdonghaoluomai
authored andcommitted
Fix bugs in examples. (#383)
* fixed-bug-example * Update tutorial_cifar10_tfrecord.py * fixed yapf * fixed yapf
1 parent 0b49359 commit 256fa2c

File tree

1 file changed

+11
-15
lines changed

1 file changed

+11
-15
lines changed

example/tutorial_cifar10_tfrecord.py

Lines changed: 11 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -44,21 +44,15 @@
4444

4545
# import numpy as np
4646
import tensorflow as tf
47-
4847
import tensorlayer as tl
4948
from tensorlayer.layers import *
5049

51-
model_file_name = "model_cifar10_tfrecord.ckpt"
50+
model_file_name = "./model_cifar10_tfrecord.ckpt"
5251
resume = False # load model, resume from previous checkpoint?
5352

5453
## Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```
5554
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
5655

57-
# X_train = np.asarray(X_train, dtype=np.float32)
58-
# y_train = np.asarray(y_train, dtype=np.int64)
59-
# X_test = np.asarray(X_test, dtype=np.float32)
60-
# y_test = np.asarray(y_test, dtype=np.int64)
61-
6256
print('X_train.shape', X_train.shape) # (50000, 32, 32, 3)
6357
print('y_train.shape', y_train.shape) # (50000,)
6458
print('X_test.shape', X_test.shape) # (10000, 32, 32, 3)
@@ -171,7 +165,7 @@ def read_and_decode(filename, is_train=None):
171165
# sess.close()
172166

173167
batch_size = 128
174-
model_file_name = "model_cifar10_advanced.ckpt"
168+
model_file_name = "./model_cifar10_advanced.ckpt"
175169
resume = False # load model, resume from previous checkpoint?
176170

177171
with tf.device('/cpu:0'):
@@ -196,7 +190,7 @@ def model(x_crop, y_, reuse):
196190
net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')
197191
# net = Conv2dLayer(net, act=tf.nn.relu, shape=[5, 5, 3, 64],
198192
# strides=[1, 1, 1, 1], padding='SAME', # 64 features for each 5x5x3 patch
199-
# W_init=W_init, name ='cnn1') # output: (batch_size, 24, 24, 64)
193+
# W_init=W_init, name ='cnn1') # output: (batch_size, 24, 24, 64)
200194
net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
201195
# net = PoolLayer(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
202196
# padding='SAME', pool = tf.nn.max_pool, name ='pool1',)# output: (batch_size, 12, 12, 64)
@@ -207,7 +201,7 @@ def model(x_crop, y_, reuse):
207201
net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2')
208202
# net = Conv2dLayer(net, act=tf.nn.relu, shape=[5, 5, 64, 64],
209203
# strides=[1, 1, 1, 1], padding='SAME', # 64 features for each 5x5 patch
210-
# W_init=W_init, name ='cnn2') # output: (batch_size, 12, 12, 64)
204+
# W_init=W_init, name ='cnn2') # output: (batch_size, 12, 12, 64)
211205
net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
212206
# net.outputs = tf.nn.lrn(net.outputs, 4, bias=1.0, alpha=0.001 / 9.0,
213207
# beta=0.75, name='norm2')
@@ -217,8 +211,7 @@ def model(x_crop, y_, reuse):
217211
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
218212
net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
219213
net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
220-
net = DenseLayer(
221-
net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output') # output: (batch_size, 10)
214+
net = DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output') # output: (batch_size, 10)
222215
y = net.outputs
223216

224217
ce = tl.cost.cross_entropy(y, y_, name='cost')
@@ -264,8 +257,7 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
264257
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
265258
net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
266259
net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
267-
net = DenseLayer(
268-
net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output') # output: (batch_size, 10)
260+
net = DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output') # output: (batch_size, 10)
269261
y = net.outputs
270262

271263
ce = tl.cost.cross_entropy(y, y_, name='cost')
@@ -302,7 +294,7 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
302294
n_step = n_epoch * n_step_epoch
303295

304296
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
305-
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize(cost)
297+
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
306298

307299
tl.layers.initialize_global_variables(sess)
308300
if resume:
@@ -352,6 +344,10 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
352344
print("Save model " + "!" * 10)
353345
saver = tf.train.Saver()
354346
save_path = saver.save(sess, model_file_name)
347+
# you can also save model into npz
348+
tl.files.save_npz(network.all_params, name='model.npz', sess=sess)
349+
# and restore it as follow:
350+
# tl.files.load_and_assign_npz(sess=sess, name='model.npz', network=network)
355351

356352
coord.request_stop()
357353
coord.join(threads)

0 commit comments

Comments
 (0)