Skip to content

Commit 8137691

Browse files
committed
fix useless string statement issues.
1 parent 95d4d93 commit 8137691

File tree

5 files changed

+51
-55
lines changed

5 files changed

+51
-55
lines changed

example/tutorial_vgg16.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,6 @@
3737
"""
3838

3939
import os
40-
import sys
41-
import time
42-
import numpy as np
43-
import tensorflow as tf
4440
import tensorlayer as tl
4541
from scipy.misc import imread, imresize
4642
from tensorlayer.layers import *
@@ -60,7 +56,8 @@ def conv_layers(net_in):
6056
"""
6157
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
6258
net_in.outputs = net_in.outputs - mean
63-
""" conv1 """
59+
60+
# conv1
6461
network = Conv2dLayer(
6562
net_in,
6663
act=tf.nn.relu,
@@ -76,7 +73,8 @@ def conv_layers(net_in):
7673
padding='SAME',
7774
name='conv1_2')
7875
network = PoolLayer(network, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool1')
79-
""" conv2 """
76+
77+
# conv2
8078
network = Conv2dLayer(
8179
network,
8280
act=tf.nn.relu,
@@ -92,7 +90,8 @@ def conv_layers(net_in):
9290
padding='SAME',
9391
name='conv2_2')
9492
network = PoolLayer(network, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool2')
95-
""" conv3 """
93+
94+
# conv3
9695
network = Conv2dLayer(
9796
network,
9897
act=tf.nn.relu,
@@ -115,7 +114,8 @@ def conv_layers(net_in):
115114
padding='SAME',
116115
name='conv3_3')
117116
network = PoolLayer(network, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool3')
118-
""" conv4 """
117+
118+
# conv4
119119
network = Conv2dLayer(
120120
network,
121121
act=tf.nn.relu,
@@ -138,7 +138,8 @@ def conv_layers(net_in):
138138
padding='SAME',
139139
name='conv4_3')
140140
network = PoolLayer(network, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', pool=tf.nn.max_pool, name='pool4')
141-
""" conv5 """
141+
142+
# conv5
142143
network = Conv2dLayer(
143144
network,
144145
act=tf.nn.relu,
@@ -173,25 +174,30 @@ def conv_layers_simple_api(net_in):
173174
"""
174175
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
175176
net_in.outputs = net_in.outputs - mean
176-
""" conv1 """
177+
178+
# conv1
177179
network = Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_1')
178180
network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2')
179181
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
180-
""" conv2 """
182+
183+
# conv2
181184
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_1')
182185
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_2')
183186
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
184-
""" conv3 """
187+
188+
# conv3
185189
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_1')
186190
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_2')
187191
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_3')
188192
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
189-
""" conv4 """
193+
194+
# conv4
190195
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_1')
191196
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_2')
192197
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_3')
193198
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
194-
""" conv5 """
199+
200+
# conv5
195201
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_1')
196202
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_2')
197203
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_3')
@@ -249,5 +255,3 @@ def fc_layers(net):
249255
preds = (np.argsort(prob)[::-1])[0:5]
250256
for p in preds:
251257
print(class_names[p], prob[p])
252-
253-
#

example/tutorial_word2vec_basic.py

Lines changed: 21 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,6 @@
3737
3838
"""
3939

40-
import collections
41-
import math
42-
import os
43-
import random
4440
import time
4541

4642
import numpy as np
@@ -56,10 +52,9 @@
5652
def main_word2vec_basic():
5753
# sess = tf.InteractiveSession()
5854
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
59-
""" Step 1: Download the data, read the context into a list of strings.
60-
Set hyperparameters.
61-
"""
6255

56+
# Step 1: Download the data, read the context into a list of strings.
57+
# Set hyperparameters.
6358
words = tl.files.load_matt_mahoney_text8_dataset()
6459
data_size = len(words)
6560
print(data_size) # 17005207
@@ -126,8 +121,8 @@ def main_word2vec_basic():
126121
print('%d Steps in a Epoch, total Epochs %d' % (int(data_size / batch_size), n_epoch))
127122
print(' learning_rate: %f' % learning_rate)
128123
print(' batch_size: %d' % batch_size)
129-
""" Step 2: Build the dictionary and replace rare words with 'UNK' token.
130-
"""
124+
125+
# Step 2: Build the dictionary and replace rare words with 'UNK' token.
131126
print()
132127
if resume:
133128
print("Load existing data and dictionaries" + "!" * 10)
@@ -146,21 +141,21 @@ def main_word2vec_basic():
146141
]) # [5243, 3081, 12, 6, 195, 2, 3135, 46, 59, 156] [b'anarchism', b'originated', b'as', b'a', b'term', b'of', b'abuse', b'first', b'used', b'against']
147142

148143
del words # Hint to reduce memory.
149-
""" Step 3: Function to generate a training batch for the Skip-Gram model.
150-
"""
144+
145+
# Step 3: Function to generate a training batch for the Skip-Gram model.
151146
print()
152-
data_index = 0
147+
153148
batch, labels, data_index = tl.nlp.generate_skip_gram_batch(data=data, batch_size=8, num_skips=4, skip_window=2, data_index=0)
154149
for i in range(8):
155150
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]])
156151

157152
batch, labels, data_index = tl.nlp.generate_skip_gram_batch(data=data, batch_size=8, num_skips=2, skip_window=1, data_index=0)
158153
for i in range(8):
159154
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]])
160-
# exit()
161-
""" Step 4: Build a Skip-Gram model.
162-
"""
155+
156+
# Step 4: Build a Skip-Gram model.
163157
print()
158+
164159
# We pick a random validation set to sample nearest neighbors. Here we limit the
165160
# validation samples to the words that have a low numeric ID, which by
166161
# construction are also the most frequent.
@@ -208,9 +203,10 @@ def main_word2vec_basic():
208203
similarity = tf.matmul(valid_embed, normalized_embeddings, transpose_b=True)
209204
# multiply all valid word vector with all word vector.
210205
# transpose_b=True, normalized_embeddings is transposed before multiplication.
211-
""" Step 5: Start training.
212-
"""
206+
207+
# Step 5: Start training.
213208
print()
209+
214210
tl.layers.initialize_global_variables(sess)
215211
if resume:
216212
print("Load existing model" + "!" * 10)
@@ -229,7 +225,7 @@ def main_word2vec_basic():
229225
average_loss = 0
230226
step = 0
231227
print_freq = 2000
232-
while (step < num_steps):
228+
while step < num_steps:
233229
start_time = time.time()
234230
batch_inputs, batch_labels, data_index = tl.nlp.generate_skip_gram_batch(
235231
data=data, batch_size=batch_size, num_skips=num_skips, skip_window=skip_window, data_index=data_index)
@@ -279,16 +275,17 @@ def main_word2vec_basic():
279275
# learning_rate = float(input("Input new learning rate: "))
280276
# train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
281277
step += 1
282-
""" Step 6: Visualize the normalized embedding matrix by t-SNE.
283-
"""
278+
279+
# Step 6: Visualize the normalized embedding matrix by t-SNE.
284280
print()
281+
285282
final_embeddings = sess.run(normalized_embeddings) #.eval()
286283
tl.visualize.tsne_embedding(final_embeddings, reverse_dictionary, plot_only=500, second=5, saveable=False, name='word2vec_basic')
287-
""" Step 7: Evaluate by analogy questions.
288-
see tensorflow/models/embedding/word2vec_optimized.py
289-
"""
284+
285+
# Step 7: Evaluate by analogy questions. see tensorflow/models/embedding/word2vec_optimized.py
290286
print()
291-
# from tensorflow/models/embedding/word2vec.py
287+
288+
# from tensorflow/models/embedding/word2vec.py
292289
analogy_questions = tl.nlp.read_analogies_file( \
293290
eval_file='questions-words.txt', word2id=dictionary)
294291
# The eval feeds three vectors of word ids for a, b, c, each of
@@ -352,5 +349,3 @@ def predict(analogy):
352349

353350
if __name__ == '__main__':
354351
main_word2vec_basic()
355-
356-
#

tensorlayer/db.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,6 @@ def load_model_architecture(self, args=None):
335335
return False, False
336336
try:
337337
archs = self.archfs.get(fid).read()
338-
'''print("[TensorDB] Find one params SUCCESS, {} took: {}s".format(args, round(time.time()-s, 2)))'''
339338
return archs, fid
340339
except Exception as e:
341340
print("exception")

tensorlayer/layers/super_resolution.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -72,14 +72,13 @@ def subpixel_conv2d(net, scale=2, n_out_channel=None, act=tf.identity, name='sub
7272
def _PS(X, r, n_out_channels):
7373
if n_out_channels >= 1:
7474
assert int(X.get_shape()[-1]) == (r**2) * n_out_channels, _err_log
75-
"""
76-
bsize, a, b, c = X.get_shape().as_list()
77-
bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim
78-
Xs=tf.split(X,r,3) #b*h*w*r*r
79-
Xr=tf.concat(Xs,2) #b*h*(r*w)*r
80-
X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c
81-
82-
"""
75+
76+
# bsize, a, b, c = X.get_shape().as_list()
77+
# bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim
78+
# Xs=tf.split(X,r,3) #b*h*w*r*r
79+
# Xr=tf.concat(Xs,2) #b*h*(r*w)*r
80+
# X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c
81+
8382
X = tf.depth_to_space(X, r)
8483
else:
8584
logging.info(_err_log)

tensorlayer/prepro.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2738,7 +2738,7 @@ def _get_coord(coord):
27382738
if is_center:
27392739
coord = obj_box_coord_centroid_to_upleft(coord)
27402740

2741-
##======= pixel unit format and upleft, w, h ==========##
2741+
# ======= pixel unit format and upleft, w, h ==========
27422742
x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2 # only change this
27432743
y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2 # only change this
27442744
w = coord[2] / zy # only change this
@@ -2776,7 +2776,7 @@ def _get_coord(coord):
27762776

27772777
coord = [x, y, w, h]
27782778

2779-
## convert back if input format is center.
2779+
# convert back if input format is center.
27802780
if is_center:
27812781
coord = obj_box_coord_upleft_to_centroid(coord)
27822782

@@ -2788,7 +2788,7 @@ def _get_coord(coord):
27882788
coord = coords[i]
27892789
assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]"
27902790
if is_rescale:
2791-
""" for scaled coord, upscaled before process and scale back in the end. """
2791+
# for scaled coord, upscaled before process and scale back in the end.
27922792
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
27932793
coord = _get_coord(coord)
27942794
if coord is not None:
@@ -2803,7 +2803,6 @@ def _get_coord(coord):
28032803
return im_new, classes_new, coords_new
28042804

28052805

2806-
## Sequence
28072806
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='pre', value=0.):
28082807
"""Pads each sequence to the same length:
28092808
the length of the longest sequence.

0 commit comments

Comments
 (0)