Skip to content

Commit 1ab2f1d

Browse files
author
Jonathan DEKHTIAR
authored
Tutorial fixing (#635)
* TF bug fixing in Tutorials * Error fix in #476 * Issue with Flags in Tutorials Fixed * Missing import fixed * Changelog Update * VGG19 import error fix * Error fixing in VGG tutorials * TFRecord Shape Error Fix * Sess Initialization Error Fix * Squeezenet model loading from "models" dir * PTB tutorials import issue fixed * mobilenet load from dir "models" * YAPF error fix * Missing Import fixed * Various Fixes on Tutorials * YAPF error correct * Update CHANGELOG.md * update VGG16 tutorial, auto download model * Python 3 Unicode Encoding Error * Deprecation Warning Fix
1 parent d274148 commit 1ab2f1d

15 files changed

+99
-56
lines changed

CHANGELOG.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,10 @@ To release a new version, please update the changelog as followed:
125125
- Error in `tl.layers.TernaryConv2d` fixed - self.inputs not defined (by @DEKHTIARJonathan in #658)
126126
- Deprecation warning fixed in `tl.layers.binary._compute_threshold()` (by @DEKHTIARJonathan in #658)
127127
- All references to `tf.logging` replaced by `tl.logging` (by @DEKHTIARJonathan in #661)
128-
128+
- Tutorial:
129+
- `tutorial_word2vec_basic.py` saving issue #476 fixed (by @DEKHTIARJonathan in #635)
130+
- All tutorials tested and errors have been fixed (by @DEKHTIARJonathan in #635)
131+
129132
### Security
130133

131134
### Dependencies Update

example/tutorial_cifar10_tfrecord.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ def read_and_decode(filename, is_train=None):
150150
# print("img_batch : %s" % img_batch._shape)
151151
# print("label_batch : %s" % label_batch._shape)
152152
#
153-
# init = tf.initialize_all_variables()
153+
# init = tf.global_variables_initializer()
154154
# with tf.Session() as sess:
155155
# sess.run(init)
156156
# coord = tf.train.Coordinator()

example/tutorial_frozenlake_dqn.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,10 @@
2626
2727
"""
2828

29+
import time
2930
import gym
31+
import numpy as np
32+
3033
import tensorflow as tf
3134
import tensorlayer as tl
3235
from tensorlayer.layers import *

example/tutorial_generate_text.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def customized_clean_str(string):
120120

121121

122122
def customized_read_words(input_fpath): #, dictionary):
123-
with open(input_fpath, "r") as f:
123+
with open(input_fpath, "r", encoding="utf8") as f:
124124
words = f.read()
125125
# Clean the data
126126
words = customized_clean_str(words)
@@ -155,7 +155,7 @@ def main_restore_embedding_layer():
155155

156156
emb_net = tl.layers.EmbeddingInputlayer(x, vocabulary_size, embedding_size, name='emb')
157157

158-
# sess.run(tf.initialize_all_variables())
158+
# sess.run(tf.global_variables_initializer())
159159
tl.layers.initialize_global_variables(sess)
160160

161161
tl.files.assign_params(sess, [load_params[0]], emb_net)
@@ -369,5 +369,3 @@ def loss_fn(outputs, targets, batch_size, sequence_length):
369369

370370
# How to generate text from a given context
371371
main_lstm_generate_text()
372-
373-
#

example/tutorial_inceptionV3_tfslim.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,12 +38,14 @@
3838

3939
slim = tf.contrib.slim
4040
try:
41-
from data.imagenet_classes import *
41+
from tensorlayer.models.imagenet_classes import *
4242
except Exception as e:
4343
raise Exception(
4444
"{} / download the file from: https://github.com/zsdonghao/tensorlayer/tree/master/example/data".format(e)
4545
)
4646

47+
MODEL_PATH = os.path.join("models", 'inception_v3.ckpt')
48+
4749

4850
def load_image(path):
4951
# load image
@@ -58,7 +60,7 @@ def load_image(path):
5860
xx = int((img.shape[1] - short_edge) / 2)
5961
crop_img = img[yy:yy + short_edge, xx:xx + short_edge]
6062
# resize to 299, 299
61-
resized_img = skimage.transform.resize(crop_img, (299, 299))
63+
resized_img = skimage.transform.resize(crop_img, (299, 299), anti_aliasing=False)
6264
return resized_img
6365

6466

@@ -89,7 +91,7 @@ def print_prob(prob):
8991
# name='alexnet_v2' # <-- the name should be the same with the ckpt model
9092
# )
9193
# sess = tf.InteractiveSession()
92-
# # sess.run(tf.initialize_all_variables())
94+
# # sess.run(tf.global_variables_initializer())
9395
# tl.layers.initialize_global_variables(sess)
9496
# network.print_params()
9597

@@ -122,15 +124,15 @@ def print_prob(prob):
122124
network.print_params(False)
123125

124126
saver = tf.train.Saver()
125-
if not os.path.isfile("inception_v3.ckpt"):
127+
if not os.path.isfile(MODEL_PATH):
126128
raise Exception(
127129
"Please download inception_v3 ckpt from https://github.com/tensorflow/models/tree/master/research/slim"
128130
)
129131

130132
try: # TF12+
131-
saver.restore(sess, "./inception_v3.ckpt")
133+
saver.restore(sess, MODEL_PATH)
132134
except Exception: # TF11
133-
saver.restore(sess, "inception_v3.ckpt")
135+
saver.restore(sess, MODEL_PATH)
134136
print("Model Restored")
135137

136138
y = network.outputs

example/tutorial_mobilenet.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
BatchNormLayer, Conv2d, DepthwiseConv2d, FlattenLayer, GlobalMeanPool2d, InputLayer, ReshapeLayer
1818
)
1919

20+
MODEL_PATH = os.path.join("models", "mobilenet.npz")
21+
2022

2123
def conv_block(n, n_filter, filter_size=(3, 3), strides=(1, 1), is_train=False, name='conv_block'):
2224
# ref: https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet.py
@@ -101,10 +103,10 @@ def mobilenet(x, is_train=True, reuse=False):
101103
sess = tf.InteractiveSession()
102104
# tl.layers.initialize_global_variables(sess)
103105

104-
if not os.path.isfile("mobilenet.npz"):
106+
if not os.path.isfile(MODEL_PATH):
105107
raise Exception("Please download mobilenet.npz from : https://github.com/tensorlayer/pretrained-models")
106108

107-
tl.files.load_and_assign_npz(sess=sess, name='mobilenet.npz', network=n)
109+
tl.files.load_and_assign_npz(sess=sess, name=MODEL_PATH, network=n)
108110

109111
img = tl.vis.read_image('data/tiger.jpeg')
110112
img = tl.prepro.imresize(img, (224, 224)) / 255
@@ -114,4 +116,4 @@ def mobilenet(x, is_train=True, reuse=False):
114116

115117
print(" End time : %.5ss" % (time.time() - start_time))
116118
print('Predicted :', decode_predictions([prob], top=3)[0])
117-
# tl.files.save_npz(n.all_params, name='mobilenet.npz', sess=sess)
119+
# tl.files.save_npz(n.all_params, name=MODEL_PATH, sess=sess)

example/tutorial_ptb_lstm.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -100,17 +100,27 @@
100100
101101
"""
102102

103+
import sys
103104
import time
104105

105106
import numpy as np
106107
import tensorflow as tf
107108

108109
import tensorlayer as tl
109110

110-
flags = tf.flags
111+
flags = tf.app.flags
112+
111113
flags.DEFINE_string("model", "small", "A type of model. Possible options are: small, medium, large.")
114+
115+
if (tf.VERSION >= '1.5'):
116+
# parse flags
117+
flags.FLAGS(sys.argv, known_only=True)
118+
flags.ArgumentParser()
119+
112120
FLAGS = flags.FLAGS
113121

122+
tf.logging.set_verbosity(tf.logging.DEBUG)
123+
114124

115125
def main(_):
116126
"""
@@ -235,7 +245,7 @@ def inference(x, is_training, num_steps, reuse=None):
235245
# Inference for Testing (Evaluation)
236246
net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=1, reuse=True)
237247

238-
# sess.run(tf.initialize_all_variables())
248+
# sess.run(tf.global_variables_initializer())
239249
tl.layers.initialize_global_variables(sess)
240250

241251
def loss_fn(outputs, targets): #, batch_size, num_steps):
@@ -269,7 +279,7 @@ def loss_fn(outputs, targets): #, batch_size, num_steps):
269279
optimizer = tf.train.GradientDescentOptimizer(lr)
270280
train_op = optimizer.apply_gradients(zip(grads, tvars))
271281

272-
# sess.run(tf.initialize_all_variables())
282+
# sess.run(tf.global_variables_initializer())
273283
tl.layers.initialize_global_variables(sess)
274284

275285
net.print_params()

example/tutorial_ptb_lstm_state_is_tuple.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,17 +101,27 @@
101101
102102
"""
103103

104+
import sys
104105
import time
105106

106107
import numpy as np
107108
import tensorflow as tf
108109

109110
import tensorlayer as tl
110111

111-
flags = tf.flags
112+
flags = tf.app.flags
113+
112114
flags.DEFINE_string("model", "small", "A type of model. Possible options are: small, medium, large.")
115+
116+
if (tf.VERSION >= '1.5'):
117+
# parse flags
118+
flags.FLAGS(sys.argv, known_only=True)
119+
flags.ArgumentParser()
120+
113121
FLAGS = flags.FLAGS
114122

123+
tf.logging.set_verbosity(tf.logging.DEBUG)
124+
115125

116126
def main(_):
117127
"""
@@ -241,7 +251,7 @@ def inference(x, is_training, num_steps, reuse=None):
241251
# Inference for Testing (Evaluation)
242252
net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=1, reuse=True)
243253

244-
# sess.run(tf.initialize_all_variables())
254+
# sess.run(tf.global_variables_initializer())
245255
tl.layers.initialize_global_variables(sess)
246256

247257
def loss_fn(outputs, targets, batch_size):
@@ -275,7 +285,7 @@ def loss_fn(outputs, targets, batch_size):
275285
optimizer = tf.train.GradientDescentOptimizer(lr)
276286
train_op = optimizer.apply_gradients(zip(grads, tvars))
277287

278-
# sess.run(tf.initialize_all_variables())
288+
# sess.run(tf.global_variables_initializer())
279289
tl.layers.initialize_global_variables(sess)
280290

281291
net.print_params()

example/tutorial_squeezenet.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@
1111
import tensorlayer as tl
1212
from tensorlayer.layers import (ConcatLayer, Conv2d, DropoutLayer, GlobalMeanPool2d, InputLayer, MaxPool2d)
1313

14+
MODEL_PATH = os.path.join("models", "squeezenet.npz")
15+
1416

1517
def decode_predictions(preds, top=5): # keras.applications.resnet50
1618
fpath = os.path.join("data", "imagenet_class_index.json")
@@ -114,8 +116,8 @@ def squeezenet(x, is_train=True, reuse=False):
114116
sess = tf.InteractiveSession()
115117
tl.layers.initialize_global_variables(sess)
116118

117-
if tl.files.file_exists('squeezenet.npz'):
118-
tl.files.load_and_assign_npz(sess=sess, name='squeezenet.npz', network=n)
119+
if tl.files.file_exists(MODEL_PATH):
120+
tl.files.load_and_assign_npz(sess=sess, name=MODEL_PATH, network=n)
119121
else:
120122
raise Exception(
121123
"please download the pre-trained squeezenet.npz from https://github.com/tensorlayer/pretrained-models"
@@ -129,4 +131,4 @@ def squeezenet(x, is_train=True, reuse=False):
129131
print(" End time : %.5ss" % (time.time() - start_time))
130132

131133
print('Predicted:', decode_predictions([prob], top=3)[0])
132-
tl.files.save_npz(n.all_params, name='squeezenet.npz', sess=sess)
134+
tl.files.save_npz(n.all_params, name=MODEL_PATH, sess=sess)

example/tutorial_tfrecord.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,9 @@ def read_and_decode(filename):
9898
img_batch, label_batch = tf.train.shuffle_batch(
9999
[img, label], batch_size=4, capacity=2000, min_after_dequeue=1000, num_threads=16
100100
)
101-
print("img_batch : %s" % img_batch._shape)
102-
print("label_batch : %s" % label_batch._shape)
103-
# init = tf.initialize_all_variables()
101+
print("img_batch : %s" % img_batch.shape)
102+
print("label_batch : %s" % label_batch.shape)
103+
# init = tf.global_variables_initializer()
104104
with tf.Session() as sess:
105105
# sess.run(init)
106106
tl.layers.initialize_global_variables(sess)
@@ -116,5 +116,3 @@ def read_and_decode(filename):
116116
coord.request_stop()
117117
coord.join(threads)
118118
sess.close()
119-
120-
#

0 commit comments

Comments
 (0)