Skip to content

Commit f53ec4a

Browse files
committed
[maintain] for TF 0.12.1, check examples
1 parent 3e41a09 commit f53ec4a

20 files changed

+275
-201
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,8 @@ train_params = network.all_params
9292
train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999,
9393
epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params)
9494

95-
# Initialize all variables
96-
sess.run(tf.initialize_all_variables())
95+
# Initialize all variables in the session
96+
tl.layers.initialize_global_variables(sess)
9797

9898
# Print network information
9999
network.print_params()

docs/modules/layers.rst

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ All TensorLayer layers have a number of properties in common:
2020

2121
All TensorLayer layers have a number of methods in common:
2222

23-
- ``layer.print_params()`` : print the network variables information in order (after ``sess.run(tf.initialize_all_variables())``). alternatively, print all variables by ``tl.layers.print_all_variables()``.
23+
- ``layer.print_params()`` : print the network variables information in order (after ``tl.layers.initialize_global_variables(sess)``). alternatively, print all variables by ``tl.layers.print_all_variables()``.
2424
- ``layer.print_layers()`` : print the network layers information in order.
2525
- ``layer.count_params()`` : print the number of parameters in the network.
2626

@@ -71,7 +71,7 @@ To count the number of parameters in a network, run ``network.count_params()``.
7171
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999,
7272
epsilon=1e-08, use_locking=False).minimize(cost, var_list = train_params)
7373
74-
sess.run(tf.initialize_all_variables())
74+
tl.layers.initialize_global_variables(sess)
7575
7676
network.print_params()
7777
network.print_layers()
@@ -260,6 +260,7 @@ Layer list
260260
get_variables_with_name
261261
set_name_reuse
262262
print_all_variables
263+
initialize_global_variables
263264

264265
Layer
265266

@@ -313,7 +314,7 @@ Layer list
313314
MultiplexerLayer
314315

315316
EmbeddingAttentionSeq2seqWrapper
316-
317+
317318
flatten_reshape
318319
clear_layers_name
319320
initialize_rnn_state
@@ -338,10 +339,15 @@ Print variables
338339
^^^^^^^^^^^^^^^^^^
339340
.. autofunction:: print_all_variables
340341

342+
Initialize variables
343+
^^^^^^^^^^^^^^^^^^^^^^
344+
.. autofunction:: initialize_global_variables
345+
341346
Basic layer
342347
-----------
343348
.. autoclass:: Layer
344349

350+
345351
Input layer
346352
------------
347353
.. autoclass:: InputLayer

docs/user/tutorial.rst

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,8 @@ TensorFlow's methods like ``sess.run()``, see ``tutorial_mnist.py`` for more det
8888
train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999,
8989
epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params)
9090
91-
# initialize all variables
92-
sess.run(tf.initialize_all_variables())
91+
# initialize all variables in the session
92+
tl.layers.initialize_global_variables(sess)
9393
9494
# print network information
9595
network.print_params()
@@ -432,6 +432,7 @@ max-pooling of factor 2 in both dimensions. And then apply a ``Conv2dLayer`` wit
432432
the 4D output to 1D vector by using ``FlattenLayer``, and apply a dropout with 50%
433433
to last hidden layer. The ``?`` represents arbitrary batch_size.
434434

435+
Note, ``tutorial_mnist.py`` introduces the simplified CNN API for beginner.
435436

436437
.. code-block:: python
437438
@@ -949,7 +950,7 @@ directories as follow.
949950
embedding_size = embedding_size,
950951
name ='embedding_layer')
951952
952-
sess.run(tf.initialize_all_variables())
953+
tl.layers.initialize_global_variables(sess)
953954
954955
tl.files.assign_params(sess, [load_params[0]], emb_net)
955956

example/tutorial_atari_pong.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,9 @@ def prepro(I):
7676
train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
7777

7878
with tf.Session() as sess:
79-
init = tf.initialize_all_variables()
80-
sess.run(init)
79+
# init = tf.initialize_all_variables()
80+
# sess.run(init)
81+
tl.layers.initialize_global_variables(sess)
8182
if resume:
8283
load_params = tl.files.load_npz(name=model_file_name+'.npz')
8384
tl.files.assign_params(sess, load_params, network)

example/tutorial_cifar10_tfrecord.py

Lines changed: 64 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -182,54 +182,43 @@ def read_and_decode(filename, is_train=None):
182182
num_threads=32)
183183

184184
def inference(x_crop, y_, reuse):
185+
"""
186+
For simplified CNN API, check tensorlayer.org
187+
"""
188+
W_init = tf.truncated_normal_initializer(stddev=5e-2)
189+
b_init = tf.constant_initializer(value=0.0)
190+
W_init2 = tf.truncated_normal_initializer(stddev=0.04)
191+
b_init2 = tf.constant_initializer(value=0.1)
185192
with tf.variable_scope("model", reuse=reuse):
186193
tl.layers.set_name_reuse(reuse)
187194
network = tl.layers.InputLayer(x_crop, name='input_layer')
188-
network = tl.layers.Conv2dLayer(network,
189-
act = tf.nn.relu,
190-
shape = [5, 5, 3, 64], # 64 features for each 5x5x3 patch
191-
strides=[1, 1, 1, 1],
192-
padding='SAME',
193-
W_init=tf.truncated_normal_initializer(stddev=5e-2),
194-
b_init=tf.constant_initializer(value=0.0),
195-
name ='cnn_layer1') # output: (batch_size, 24, 24, 64)
196-
network = tl.layers.PoolLayer(network,
197-
ksize=[1, 3, 3, 1],
198-
strides=[1, 2, 2, 1],
199-
padding='SAME',
200-
pool = tf.nn.max_pool,
201-
name ='pool_layer1',) # output: (batch_size, 12, 12, 64)
195+
network = tl.layers.Conv2dLayer(network, act=tf.nn.relu,
196+
shape=[5, 5, 3, 64], strides=[1, 1, 1, 1], padding='SAME', # 64 features for each 5x5x3 patch
197+
W_init=W_init, b_init=b_init, name ='cnn_layer1') # output: (batch_size, 24, 24, 64)
198+
network = tl.layers.PoolLayer(network, ksize=[1, 3, 3, 1],
199+
strides=[1, 2, 2, 1], padding='SAME',
200+
pool = tf.nn.max_pool, name ='pool_layer1',) # output: (batch_size, 12, 12, 64)
201+
# you can also use tl.layers.LocalResponseNormLayer
202202
network.outputs = tf.nn.lrn(network.outputs, 4, bias=1.0, alpha=0.001 / 9.0,
203203
beta=0.75, name='norm1')
204-
network = tl.layers.Conv2dLayer(network,
205-
act = tf.nn.relu,
206-
shape = [5, 5, 64, 64], # 64 features for each 5x5 patch
207-
strides=[1, 1, 1, 1],
208-
padding='SAME',
209-
W_init=tf.truncated_normal_initializer(stddev=5e-2),
210-
b_init=tf.constant_initializer(value=0.1),
211-
name ='cnn_layer2') # output: (batch_size, 12, 12, 64)
204+
205+
network = tl.layers.Conv2dLayer(network, act=tf.nn.relu,
206+
shape=[5, 5, 64, 64], strides=[1, 1, 1, 1], padding='SAME',# 64 features for each 5x5 patch
207+
W_init=W_init, b_init=b_init, name ='cnn_layer2') # output: (batch_size, 12, 12, 64)
212208
network.outputs = tf.nn.lrn(network.outputs, 4, bias=1.0, alpha=0.001 / 9.0,
213209
beta=0.75, name='norm2')
214-
network = tl.layers.PoolLayer(network,
215-
ksize=[1, 3, 3, 1],
216-
strides=[1, 2, 2, 1],
217-
padding='SAME',
218-
pool = tf.nn.max_pool,
219-
name ='pool_layer2') # output: (batch_size, 6, 6, 64)
220-
network = tl.layers.FlattenLayer(network, name='flatten_layer') # output: (batch_size, 2304)
221-
network = tl.layers.DenseLayer(network, n_units=384, act = tf.nn.relu,
222-
W_init=tf.truncated_normal_initializer(stddev=0.04),
223-
b_init=tf.constant_initializer(value=0.1),
224-
name='relu1') # output: (batch_size, 384)
225-
network = tl.layers.DenseLayer(network, n_units=192, act = tf.nn.relu,
226-
W_init=tf.truncated_normal_initializer(stddev=0.04),
227-
b_init=tf.constant_initializer(value=0.1),
228-
name='relu2') # output: (batch_size, 192)
229-
network = tl.layers.DenseLayer(network, n_units=10, act = tf.identity,
230-
W_init=tf.truncated_normal_initializer(stddev=1/192.0),
231-
b_init = tf.constant_initializer(value=0.0),
232-
name='output_layer') # output: (batch_size, 10)
210+
network = tl.layers.PoolLayer(network, ksize=[1, 3, 3, 1],
211+
strides=[1, 2, 2, 1], padding='SAME',
212+
pool = tf.nn.max_pool, name ='pool_layer2') # output: (batch_size, 6, 6, 64)
213+
network = tl.layers.FlattenLayer(network, name='flatten_layer') # output: (batch_size, 2304)
214+
network = tl.layers.DenseLayer(network, n_units=384, act=tf.nn.relu,
215+
W_init=W_init2, b_init=b_init2, name='relu1') # output: (batch_size, 384)
216+
network = tl.layers.DenseLayer(network, n_units=192, act=tf.nn.relu,
217+
W_init=W_init2, b_init=b_init2, name='relu2') # output: (batch_size, 192)
218+
network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity,
219+
W_init=tf.truncated_normal_initializer(stddev=1/192.0),
220+
b_init = tf.constant_initializer(value=0.0),
221+
name='output_layer') # output: (batch_size, 10)
233222
y = network.outputs
234223

235224
ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_))
@@ -248,59 +237,44 @@ def inference_batch_norm(x_crop, y_, reuse, is_train):
248237
"""
249238
For batch normalization, the normalization should be placed after cnn
250239
with linear activation.
240+
241+
For simplified CNN API, check tensorlayer.org
251242
"""
243+
W_init = tf.truncated_normal_initializer(stddev=5e-2)
244+
W_init2 = tf.truncated_normal_initializer(stddev=0.04)
245+
b_init2 = tf.constant_initializer(value=0.1)
252246
with tf.variable_scope("model", reuse=reuse):
253247
tl.layers.set_name_reuse(reuse)
254248
network = tl.layers.InputLayer(x_crop, name='input_layer')
255-
network = tl.layers.Conv2dLayer(network,
256-
act = tf.identity,
257-
shape = [5, 5, 3, 64], # 64 features for each 5x5x3 patch
258-
strides=[1, 1, 1, 1],
259-
padding='SAME',
260-
W_init=tf.truncated_normal_initializer(stddev=5e-2),
261-
# b_init=tf.constant_initializer(value=0.0),
262-
b_init=None,
263-
name ='cnn_layer1') # output: (batch_size, 24, 24, 64)
264-
network = tl.layers.BatchNormLayer(network, is_train=is_train, name='batch_norm1')
265-
network.outputs = tf.nn.relu(network.outputs, name='relu1')
266-
network = tl.layers.PoolLayer(network,
267-
ksize=[1, 3, 3, 1],
268-
strides=[1, 2, 2, 1],
269-
padding='SAME',
270-
pool = tf.nn.max_pool,
271-
name ='pool_layer1',) # output: (batch_size, 12, 12, 64)
272-
273-
network = tl.layers.Conv2dLayer(network,
274-
act = tf.identity,
275-
shape = [5, 5, 64, 64], # 64 features for each 5x5 patch
276-
strides=[1, 1, 1, 1],
277-
padding='SAME',
278-
W_init=tf.truncated_normal_initializer(stddev=5e-2),
279-
# b_init=tf.constant_initializer(value=0.1),
280-
b_init=None,
281-
name ='cnn_layer2') # output: (batch_size, 12, 12, 64)
282-
283-
network = tl.layers.BatchNormLayer(network, is_train=is_train, name='batch_norm2')
284-
network.outputs = tf.nn.relu(network.outputs, name='relu2')
285-
network = tl.layers.PoolLayer(network,
286-
ksize=[1, 3, 3, 1],
287-
strides=[1, 2, 2, 1],
288-
padding='SAME',
289-
pool = tf.nn.max_pool,
290-
name ='pool_layer2') # output: (batch_size, 6, 6, 64)
291-
network = tl.layers.FlattenLayer(network, name='flatten_layer') # output: (batch_size, 2304)
292-
network = tl.layers.DenseLayer(network, n_units=384, act = tf.nn.relu,
293-
W_init=tf.truncated_normal_initializer(stddev=0.04),
294-
b_init=tf.constant_initializer(value=0.1),
295-
name='relu1') # output: (batch_size, 384)
249+
network = tl.layers.Conv2dLayer(network, act=tf.identity,
250+
shape=[5, 5, 3, 64], strides=[1, 1, 1, 1], padding='SAME', # 64 features for each 5x5x3 patch
251+
W_init=W_init, b_init=None, name='cnn_layer1') # output: (batch_size, 24, 24, 64)
252+
network = tl.layers.BatchNormLayer(network, is_train=is_train,
253+
act=tf.nn.relu, name='batch_norm1')
254+
255+
network = tl.layers.PoolLayer(network, ksize=[1, 3, 3, 1],
256+
strides=[1, 2, 2, 1], padding='SAME',
257+
pool=tf.nn.max_pool, name='pool_layer1',) # output: (batch_size, 12, 12, 64)
258+
259+
network = tl.layers.Conv2dLayer(network, act=tf.identity,
260+
shape=[5, 5, 64, 64], strides=[1, 1, 1, 1], padding='SAME',# 64 features for each 5x5 patch
261+
W_init=W_init, b_init=None, name ='cnn_layer2') # output: (batch_size, 12, 12, 64)
262+
263+
network = tl.layers.BatchNormLayer(network, is_train=is_train,
264+
act=tf.nn.relu, name='batch_norm2')
265+
266+
network = tl.layers.PoolLayer(network, ksize=[1, 3, 3, 1],
267+
strides=[1, 2, 2, 1], padding='SAME',
268+
pool = tf.nn.max_pool, name ='pool_layer2') # output: (batch_size, 6, 6, 64)
269+
network = tl.layers.FlattenLayer(network, name='flatten_layer') # output: (batch_size, 2304)
270+
network = tl.layers.DenseLayer(network, n_units=384, act=tf.nn.relu,
271+
W_init=W_init2, b_init=b_init2, name='relu1') # output: (batch_size, 384)
296272
network = tl.layers.DenseLayer(network, n_units=192, act = tf.nn.relu,
297-
W_init=tf.truncated_normal_initializer(stddev=0.04),
298-
b_init=tf.constant_initializer(value=0.1),
299-
name='relu2') # output: (batch_size, 192)
273+
W_init=W_init2, b_init=b_init2, name='relu2') # output: (batch_size, 192)
300274
network = tl.layers.DenseLayer(network, n_units=10, act = tf.identity,
301-
W_init=tf.truncated_normal_initializer(stddev=1/192.0),
302-
b_init = tf.constant_initializer(value=0.0),
303-
name='output_layer') # output: (batch_size, 10)
275+
W_init=tf.truncated_normal_initializer(stddev=1/192.0),
276+
b_init = tf.constant_initializer(value=0.0),
277+
name='output_layer') # output: (batch_size, 10)
304278
y = network.outputs
305279

306280
ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_))
@@ -321,7 +295,7 @@ def inference_batch_norm(x_crop, y_, reuse, is_train):
321295
# y_ = tf.placeholder(tf.int32, shape=[batch_size,])
322296
# cost, acc, network = inference(x_crop, y_, None)
323297

324-
with tf.device('/gpu:0'):
298+
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
325299
# network in gpu
326300
cost, acc, network = inference(x_train_batch, y_train_batch, None)
327301
cost_test, acc_test, _ = inference(x_test_batch, y_test_batch, True)
@@ -336,7 +310,7 @@ def inference_batch_norm(x_crop, y_, reuse, is_train):
336310
n_step_epoch = int(len(y_train)/batch_size)
337311
n_step = n_epoch * n_step_epoch
338312

339-
with tf.device('/gpu:0'):
313+
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
340314
# train in gpu
341315
train_params = network.all_params
342316
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999,
@@ -357,7 +331,6 @@ def inference_batch_norm(x_crop, y_, reuse, is_train):
357331

358332
coord = tf.train.Coordinator()
359333
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
360-
# for step in range(n_step):
361334
step = 0
362335
for epoch in range(n_epoch):
363336
start_time = time.time()

example/tutorial_generate_text.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,8 @@ def main_restore_embedding_layer():
154154
embedding_size = embedding_size,
155155
name ='embedding_layer')
156156

157-
sess.run(tf.initialize_all_variables())
157+
# sess.run(tf.initialize_all_variables())
158+
tl.layers.initialize_global_variables(sess)
158159

159160
tl.files.assign_params(sess, [load_params[0]], emb_net)
160161

example/tutorial_inceptionV3_tfslim.py

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def print_prob(prob):
6464
return top1
6565

6666

67-
## Alexnet_v2 / All Slim nets can be merged into TensorLayer
67+
## Alexnet_v2 / All TF-Slim nets can be merged into TensorLayer
6868
# x = tf.placeholder(tf.float32, shape=[None, 299, 299, 3])
6969
# net_in = tl.layers.InputLayer(x, name='input_layer')
7070
# network = tl.layers.SlimNetsLayer(layer=net_in, slim_layer=alexnet_v2,
@@ -74,14 +74,16 @@ def print_prob(prob):
7474
# 'dropout_keep_prob' : 0.5,
7575
# 'spatial_squeeze' : True,
7676
# 'scope' : 'alexnet_v2'
77-
# }
77+
# },
78+
# name='alexnet_v2' # <-- the name should be the same with the ckpt model
7879
# )
7980
# sess = tf.InteractiveSession()
80-
# sess.run(tf.initialize_all_variables())
81+
# # sess.run(tf.initialize_all_variables())
82+
# tl.layers.initialize_global_variables(sess)
8183
# network.print_params()
82-
# exit()
8384

84-
# InceptionV3
85+
86+
## InceptionV3 / All TF-Slim nets can be merged into TensorLayer
8587
x = tf.placeholder(tf.float32, shape=[None, 299, 299, 3])
8688
net_in = tl.layers.InputLayer(x, name='input_layer')
8789
with slim.arg_scope(inception_v3_arg_scope()):
@@ -92,7 +94,7 @@ def print_prob(prob):
9294
slim_args= {
9395
'num_classes' : 1001,
9496
'is_training' : False,
95-
# 'dropout_keep_prob' : 0.8, # for training
97+
# 'dropout_keep_prob' : 0.8, # for training
9698
# 'min_depth' : 16,
9799
# 'depth_multiplier' : 1.0,
98100
# 'prediction_fn' : slim.softmax,
@@ -102,16 +104,17 @@ def print_prob(prob):
102104
},
103105
name='InceptionV3' # <-- the name should be the same with the ckpt model
104106
)
105-
saver = tf.train.Saver()
106107

107108
sess = tf.InteractiveSession()
108-
sess.run(tf.initialize_all_variables())
109109

110-
# with tf.Session() as sess:
111-
saver.restore(sess, "inception_v3.ckpt") # download from https://github.com/tensorflow/models/tree/master/slim#Install
112-
print("Model Restored")
113110
network.print_params(False)
114111

112+
saver = tf.train.Saver()
113+
try: # TF12
114+
saver.restore(sess, "./inception_v3.ckpt") # download from https://github.com/tensorflow/models/tree/master/slim#Install
115+
except: # TF11
116+
saver.restore(sess, "inception_v3.ckpt")
117+
print("Model Restored")
115118

116119
from scipy.misc import imread, imresize
117120
y = network.outputs

0 commit comments

Comments
 (0)