Skip to content

Commit 6e69b5e

Browse files
committed
update cifar10 examples , using get variables with name
1 parent 7e516e8 commit 6e69b5e

File tree

2 files changed

+15
-12
lines changed

2 files changed

+15
-12
lines changed

example/tutorial_cifar10.py

100755100644
Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,18 +100,19 @@ def model_batch_norm(x, y_, reuse, is_train):
100100

101101
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
102102
net = DenseLayer(net, n_units=384, act=tf.nn.relu,
103-
W_init=W_init2, b_init=b_init2, name='relu1') # output: (batch_size, 384)
103+
W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
104104
net = DenseLayer(net, n_units=192, act = tf.nn.relu,
105-
W_init=W_init2, b_init=b_init2, name='relu2') # output: (batch_size, 192)
105+
W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
106106
net = DenseLayer(net, n_units=10, act = tf.identity,
107107
W_init=tf.truncated_normal_initializer(stddev=1/192.0),
108108
name='output') # output: (batch_size, 10)
109109
y = net.outputs
110110

111111
ce = tl.cost.cross_entropy(y, y_, name='cost')
112112
# L2 for the MLP, without this, the accuracy will be reduced by 15%.
113-
L2 = tf.contrib.layers.l2_regularizer(0.004)(net.all_params[4]) + \
114-
tf.contrib.layers.l2_regularizer(0.004)(net.all_params[6])
113+
L2 = 0
114+
for p in tl.layers.get_variables_with_name('relu/W', True, True):
115+
L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
115116
cost = ce + L2
116117

117118
correct_prediction = tf.equal(tf.argmax(y, 1), y_)
@@ -152,8 +153,10 @@ def distort_fn(x, is_train=False):
152153
x = tf.placeholder(tf.float32, shape=[None, 24, 24, 3], name='x')
153154
y_ = tf.placeholder(tf.int64, shape=[None, ], name='y_')
154155

156+
## using local response normalization
155157
# network, cost, _ = model(x, y_, False)
156158
# _, cost_test, acc = model(x, y_, True)
159+
## you may want to try batch normalization
157160
network, cost, _ = model_batch_norm(x, y_, False, is_train=True)
158161
_, cost_test, acc = model_batch_norm(x, y_, True, is_train=False)
159162

@@ -194,7 +197,7 @@ def distort_fn(x, is_train=False):
194197
# print(" train acc: %f" % (train_acc/ n_batch))
195198
test_loss, test_acc, n_batch = 0, 0, 0
196199
for X_test_a, y_test_a in tl.iterate.minibatches(
197-
X_test, y_test, batch_size, shuffle=True):
200+
X_test, y_test, batch_size, shuffle=False):
198201
X_test_a = tl.prepro.threading_data(X_test_a, fn=distort_fn, is_train=False) # central crop
199202
err, ac = sess.run([cost_test, acc], feed_dict={x: X_test_a, y_: y_test_a})
200203
test_loss += err; test_acc += ac; n_batch += 1

example/tutorial_cifar10_tfrecord.py

100755100644
Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -268,21 +268,21 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
268268

269269
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
270270
net = DenseLayer(net, n_units=384, act=tf.nn.relu,
271-
W_init=W_init2, b_init=b_init2, name='relu1') # output: (batch_size, 384)
271+
W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
272272
net = DenseLayer(net, n_units=192, act = tf.nn.relu,
273-
W_init=W_init2, b_init=b_init2, name='relu2') # output: (batch_size, 192)
273+
W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
274274
net = DenseLayer(net, n_units=10, act = tf.identity,
275275
W_init=tf.truncated_normal_initializer(stddev=1/192.0),
276276
name='output') # output: (batch_size, 10)
277277
y = net.outputs
278278

279279
ce = tl.cost.cross_entropy(y, y_, name='cost')
280280
# L2 for the MLP, without this, the accuracy will be reduced by 15%.
281-
L2 = tf.contrib.layers.l2_regularizer(0.004)(net.all_params[4]) + \
282-
tf.contrib.layers.l2_regularizer(0.004)(net.all_params[6])
281+
L2 = 0
282+
for p in tl.layers.get_variables_with_name('relu/W', True, True):
283+
L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
283284
cost = ce + L2
284285

285-
# correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(y), 1), y_)
286286
correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
287287
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
288288

@@ -295,10 +295,10 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
295295
# cost, acc, network = model(x_crop, y_, None)
296296

297297
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
298-
# network in gpu
298+
## using local response normalization
299299
network, cost, acc, = model(x_train_batch, y_train_batch, False)
300300
_, cost_test, acc_test = model(x_test_batch, y_test_batch, True)
301-
# you may want to try batch normalization
301+
## you may want to try batch normalization
302302
# network, cost, acc, = model_batch_norm(x_train_batch, y_train_batch, None, is_train=True)
303303
# _, cost_test, acc_test = model_batch_norm(x_test_batch, y_test_batch, True, is_train=False)
304304

0 commit comments

Comments
 (0)