Skip to content

Commit 98291f0

Browse files
DEKHTIARJonathanzsdonghao
authored andcommitted
[WIP] - Documentation Cleaning and Coding Style (#516)
* Activation Cleaning Docstring Test * Requirements Pinned with range to insure tested versions are used. Range are used to prevent updating requirements all the time. * setup.cfg file added with PEP8 configuration * activation.py refactored * docstring fixed - ready for documentation unittest * Yapf correction for max_line_length: 120 * test yapf refactored * Requirements conflict solved * Yapf Style modified and merged in file "setup.cfg" * Yapf Confiuguration Updated * Code Refactored with new YAPF formating style * Code Refactored with new YAPF formating style * Code Refactored with new YAPF formating style * tl.layers.pooling YAPF reformat * yapf updated * gitignore updated * YAPF Style Fixing Attempt * Space Error Fix * Style Correction * Assertion Codacy Errors Corrected * Error Fix * Assertion Refactored * YAPF Style Applied to Master
1 parent 7bbe41d commit 98291f0

File tree

86 files changed

+2084
-1061
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

86 files changed

+2084
-1061
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,3 +12,5 @@ tensorlayer.egg-info
1212
tensorlayer/__pacache__
1313
venv/
1414
.pytest_cache/
15+
update_tl.bat
16+
update_tl.py

.style.yapf

Lines changed: 58 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,60 @@
11
[style]
2-
based_on_style = pep8
2+
based_on_style=google
3+
4+
# The number of columns to use for indentation.
35
indent_width = 4
4-
column_limit = 160
6+
7+
# The column limit.
8+
column_limit=120
9+
10+
# Place each dictionary entry onto its own line.
11+
each_dict_entry_on_separate_line = True
12+
13+
# Put closing brackets on a separate line, dedented, if the bracketed
14+
# expression can't fit in a single line. Applies to all kinds of brackets,
15+
# including function definitions and calls. For example:
16+
#
17+
# config = {
18+
# 'key1': 'value1',
19+
# 'key2': 'value2',
20+
# } # <--- this bracket is dedented and on a separate line
21+
#
22+
# time_series = self.remote_client.query_entity_counters(
23+
# entity='dev3246.region1',
24+
# key='dns.query_latency_tcp',
25+
# transform=Transformation.AVERAGE(window=timedelta(seconds=60)),
26+
# start_ts=now()-timedelta(days=3),
27+
# end_ts=now(),
28+
# ) # <--- this bracket is dedented and on a separate line
29+
dedent_closing_brackets=True
30+
31+
# Do not split consecutive brackets. Only relevant when DEDENT_CLOSING_BRACKETS is set
32+
coalesce_brackets = False
33+
34+
# Align closing bracket with visual indentation.
35+
align_closing_bracket_with_visual_indent = False
36+
37+
# Split named assignments onto individual lines.
38+
split_before_named_assigns = False
39+
40+
# If an argument / parameter list is going to be split, then split before the first argument.
41+
split_before_first_argument = True
42+
43+
# Split before arguments if the argument list is terminated by a comma.
44+
split_arguments_when_comma_terminated = False
45+
46+
# Insert a space between the ending comma and closing bracket of a list, etc.
47+
space_between_ending_comma_and_closing_bracket = True
48+
49+
# Join short lines into one line. E.g., single line if statements.
50+
join_multiple_lines = True
51+
52+
# Do not include spaces around selected binary operators.
53+
# Example: 1 + 2 * 3 - 4 / 5 => 1 + 2*3 - 4/5
54+
no_spaces_around_selected_binary_operators = True
55+
56+
# Allow lambdas to be formatted on more than one line.
57+
allow_multiline_lambdas = True
58+
59+
SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = 10
60+
SPLIT_PENALTY_AFTER_OPENING_BRACKET = 500

docs/index.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ method, this part of the documentation is for you.
5454
modules/activation
5555
modules/models
5656
modules/distributed
57+
modules/db
5758

5859

5960
Command-line Reference

example/tutorial_atari_pong.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,11 @@ def prepro(I):
133133
prev_x = None
134134

135135
if reward != 0:
136-
print(('episode %d: game %d took %.5fs, reward: %f' % (episode_number, game_number, time.time() - start_time, reward)),
137-
('' if reward == -1 else ' !!!!!!!!'))
136+
print(
137+
(
138+
'episode %d: game %d took %.5fs, reward: %f' %
139+
(episode_number, game_number, time.time() - start_time, reward)
140+
), ('' if reward == -1 else ' !!!!!!!!')
141+
)
138142
start_time = time.time()
139143
game_number += 1

example/tutorial_binarynet_cifar10_tfrecord.py

Lines changed: 42 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,9 @@ def data_to_tfrecord(images, labels, filename):
8383
feature={
8484
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
8585
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
86-
}))
86+
}
87+
)
88+
)
8789
writer.write(example.SerializeToString()) # Serialize To String
8890
writer.close()
8991

@@ -97,12 +99,13 @@ def read_and_decode(filename, is_train=None):
9799
serialized_example, features={
98100
'label': tf.FixedLenFeature([], tf.int64),
99101
'img_raw': tf.FixedLenFeature([], tf.string),
100-
})
102+
}
103+
)
101104
# You can do more image distortion here for training data
102105
img = tf.decode_raw(features['img_raw'], tf.float32)
103106
img = tf.reshape(img, [32, 32, 3])
104107
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
105-
if is_train == True:
108+
if is_train ==True:
106109
# 1. Randomly crop a [height, width] section of the image.
107110
img = tf.random_crop(img, [24, 24, 3])
108111
# 2. Randomly flip the image horizontally.
@@ -147,9 +150,12 @@ def read_and_decode(filename, is_train=None):
147150
x_test_, y_test_ = read_and_decode("test.cifar10", False)
148151

149152
x_train_batch, y_train_batch = tf.train.shuffle_batch(
150-
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32) # set the number of threads here
153+
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
154+
) # set the number of threads here
151155
# for testing, uses batch instead of shuffle_batch
152-
x_test_batch, y_test_batch = tf.train.batch([x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32)
156+
x_test_batch, y_test_batch = tf.train.batch(
157+
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
158+
)
153159

154160
def model(x_crop, y_, reuse):
155161
""" For more simplified CNN APIs, check tensorlayer.org """
@@ -161,16 +167,28 @@ def model(x_crop, y_, reuse):
161167
net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')
162168
net = tl.layers.SignLayer(net)
163169
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
164-
net = tl.layers.LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
165-
net = tl.layers.BinaryConv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2')
166-
net = tl.layers.LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
170+
net = tl.layers.LocalResponseNormLayer(
171+
net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1'
172+
)
173+
net = tl.layers.BinaryConv2d(
174+
net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2'
175+
)
176+
net = tl.layers.LocalResponseNormLayer(
177+
net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2'
178+
)
167179
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
168180
net = tl.layers.FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
169181
net = tl.layers.SignLayer(net)
170-
net = tl.layers.BinaryDenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
182+
net = tl.layers.BinaryDenseLayer(
183+
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
184+
) # output: (batch_size, 384)
171185
net = tl.layers.SignLayer(net)
172-
net = tl.layers.BinaryDenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
173-
net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output') # output: (batch_size, 10)
186+
net = tl.layers.BinaryDenseLayer(
187+
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
188+
) # output: (batch_size, 192)
189+
net = tl.layers.DenseLayer(
190+
net, n_units=10, act=tf.identity, W_init=W_init2, name='output'
191+
) # output: (batch_size, 10)
174192
y = net.outputs
175193

176194
ce = tl.cost.cross_entropy(y, y_, name='cost')
@@ -201,9 +219,15 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
201219
net = tl.layers.BatchNormLayer(net, is_train, act=tf.nn.relu, name='batch2')
202220
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
203221
net = tl.layers.FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
204-
net = tl.layers.DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
205-
net = tl.layers.DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
206-
net = tl.layers.DenseLayer(net, n_units=10, act=tf.identity, W_init=W_init2, name='output') # output: (batch_size, 10)
222+
net = tl.layers.DenseLayer(
223+
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
224+
) # output: (batch_size, 384)
225+
net = tl.layers.DenseLayer(
226+
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
227+
) # output: (batch_size, 192)
228+
net = tl.layers.DenseLayer(
229+
net, n_units=10, act=tf.identity, W_init=W_init2, name='output'
230+
) # output: (batch_size, 10)
207231
y = net.outputs
208232

209233
ce = tl.cost.cross_entropy(y, y_, name='cost')
@@ -273,7 +297,10 @@ def model_batch_norm(x_crop, y_, reuse, is_train):
273297
n_batch += 1
274298

275299
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
276-
print("Epoch %d : Step %d-%d of %d took %fs" % (epoch, step, step + n_step_epoch, n_step, time.time() - start_time))
300+
print(
301+
"Epoch %d : Step %d-%d of %d took %fs" %
302+
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
303+
)
277304
print(" train loss: %f" % (train_loss / n_batch))
278305
print(" train acc: %f" % (train_acc / n_batch))
279306

example/tutorial_bipedalwalker_a3c_continuous_action.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@
6767

6868

6969
class ACNet(object):
70+
7071
def __init__(self, scope, globalAC=None):
7172
self.scope = scope
7273
if scope == GLOBAL_NET_SCOPE:
@@ -144,7 +145,8 @@ def _build_net(self):
144145
self.v = v.outputs
145146

146147
def update_global(self, feed_dict): # run by a local
147-
_, _, t = sess.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net
148+
_, _, t = sess.run([self.update_a_op, self.update_c_op, self.test],
149+
feed_dict) # local grads applies to global net
148150
return t
149151

150152
def pull_global(self): # run by a local
@@ -156,14 +158,18 @@ def choose_action(self, s): # run by a local
156158

157159
def save_ckpt(self):
158160
tl.files.exists_or_mkdir(self.scope)
159-
tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', var_list=self.a_params + self.c_params, save_dir=self.scope, printable=True)
161+
tl.files.save_ckpt(
162+
sess=sess, mode_name='model.ckpt', var_list=self.a_params + self.c_params, save_dir=self.scope,
163+
printable=True
164+
)
160165

161166
def load_ckpt(self):
162167
tl.files.load_ckpt(sess=sess, var_list=self.a_params + self.c_params, save_dir=self.scope, printable=True)
163168
# tl.files.load_ckpt(sess=sess, mode_name='model.ckpt', var_list=self.a_params+self.c_params, save_dir=self.scope, is_latest=False, printable=True)
164169

165170

166171
class Worker(object):
172+
167173
def __init__(self, name, globalAC):
168174
self.env = gym.make(GAME)
169175
self.name = name
@@ -202,7 +208,10 @@ def work(self):
202208
buffer_v_target.append(v_s_)
203209
buffer_v_target.reverse()
204210

205-
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
211+
buffer_s, buffer_a, buffer_v_target = (
212+
np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
213+
)
214+
206215
feed_dict = {
207216
self.AC.s: buffer_s,
208217
self.AC.a_his: buffer_a,

example/tutorial_cartpole_ac.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@
6969

7070

7171
class Actor(object):
72+
7273
def __init__(self, sess, n_features, n_actions, lr=0.001):
7374
self.sess = sess
7475
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
@@ -85,7 +86,9 @@ def __init__(self, sess, n_features, n_actions, lr=0.001):
8586

8687
# Hao Dong
8788
with tf.variable_scope('loss'):
88-
self.exp_v = tl.rein.cross_entropy_reward_loss(logits=self.acts_logits, actions=self.a, rewards=self.td_error, name='actor_weighted_loss')
89+
self.exp_v = tl.rein.cross_entropy_reward_loss(
90+
logits=self.acts_logits, actions=self.a, rewards=self.td_error, name='actor_weighted_loss'
91+
)
8992

9093
with tf.variable_scope('train'):
9194
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.exp_v)
@@ -113,6 +116,7 @@ def choose_action_greedy(self, s):
113116

114117

115118
class Critic(object):
119+
116120
def __init__(self, sess, n_features, lr=0.01):
117121
self.sess = sess
118122
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
@@ -143,7 +147,9 @@ def learn(self, s, r, s_):
143147
sess = tf.Session()
144148

145149
actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A)
146-
critic = Critic(sess, n_features=N_F, lr=LR_C) # we need a good teacher, so the teacher should learn faster than the actor
150+
critic = Critic(
151+
sess, n_features=N_F, lr=LR_C
152+
) # we need a good teacher, so the teacher should learn faster than the actor
147153

148154
tl.layers.initialize_global_variables(sess)
149155

@@ -187,7 +193,10 @@ def learn(self, s, r, s_):
187193
running_reward = running_reward * 0.95 + ep_rs_sum * 0.05
188194
# start rending if running_reward greater than a threshold
189195
# if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True
190-
print("Episode: %d reward: %f running_reward %f took: %.5f" % (i_episode, ep_rs_sum, running_reward, time.time() - episode_time))
196+
print(
197+
"Episode: %d reward: %f running_reward %f took: %.5f" %
198+
(i_episode, ep_rs_sum, running_reward, time.time() - episode_time)
199+
)
191200

192201
# Early Stopping for quick check
193202
if t >= MAX_EP_STEPS:

example/tutorial_cifar10.py

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,15 @@ def model(x, y_, reuse):
4444
# net = PoolLayer(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
4545
# padding='SAME', pool = tf.nn.max_pool, name ='pool2') # output: (batch_size, 6, 6, 64)
4646
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
47-
net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
48-
net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
49-
net = DenseLayer(net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output') # output: (batch_size, 10)
47+
net = DenseLayer(
48+
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
49+
) # output: (batch_size, 384)
50+
net = DenseLayer(
51+
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
52+
) # output: (batch_size, 192)
53+
net = DenseLayer(
54+
net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output'
55+
) # output: (batch_size, 10)
5056
y = net.outputs
5157

5258
ce = tl.cost.cross_entropy(y, y_, name='cost')
@@ -89,9 +95,15 @@ def model_batch_norm(x, y_, reuse, is_train):
8995
# padding='SAME', pool = tf.nn.max_pool, name ='pool2') # output: (batch_size, 6, 6, 64)
9096

9197
net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)
92-
net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu') # output: (batch_size, 384)
93-
net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu') # output: (batch_size, 192)
94-
net = DenseLayer(net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output') # output: (batch_size, 10)
98+
net = DenseLayer(
99+
net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu'
100+
) # output: (batch_size, 384)
101+
net = DenseLayer(
102+
net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu'
103+
) # output: (batch_size, 192)
104+
net = DenseLayer(
105+
net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output'
106+
) # output: (batch_size, 10)
95107
y = net.outputs
96108

97109
ce = tl.cost.cross_entropy(y, y_, name='cost')
@@ -136,11 +148,8 @@ def distort_fn(x, is_train=False):
136148
return x
137149

138150

139-
x = tf.placeholder(tf.float32, shape=[None, 24, 24, 3], name='x')
140-
y_ = tf.placeholder(
141-
tf.int64, shape=[
142-
None,
143-
], name='y_')
151+
x = tf.placeholder(dtype=tf.float32, shape=[None, 24, 24, 3], name='x')
152+
y_ = tf.placeholder(dtype=tf.int64, shape=[None], name='y_')
144153

145154
## using local response normalization
146155
# network, cost, _ = model(x, y_, False)
@@ -156,7 +165,8 @@ def distort_fn(x, is_train=False):
156165
batch_size = 128
157166

158167
train_params = network.all_params
159-
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params)
168+
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08,
169+
use_locking=False).minimize(cost, var_list=train_params)
160170

161171
tl.layers.initialize_global_variables(sess)
162172

0 commit comments

Comments
 (0)