Skip to content

Commit ecd4e6c

Browse files
pyup-botJonathan DEKHTIAR
authored andcommitted
Dependency and YAPF Style Update (#808)
* Pin yapf to latest version 0.23.0 * Update requirements_test.txt * Update CHANGELOG.md * YAPF Cleaning
1 parent 97d4695 commit ecd4e6c

15 files changed

+25
-26
lines changed

CHANGELOG.md

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -134,13 +134,12 @@ To release a new version, please update the changelog as followed:
134134

135135
- tensorflow>=1.8,<1.9 => tensorflow>=1.6,<1.11 (PR #739 and PR #798)
136136
- tensorflow-gpu>=1.8,<1.9 => tensorflow-gpu>=1.6,<1.11 (PR #739 and PR #798)
137-
- pymongo>=3.6,<3.7 => pymongo>=3.6,<3.8 (PR #750)
138137
- numpy>=1.14,<1.15 => numpy>=1.14,<1.16 (PR #754)
139-
- tqdm>=4.23,<4.24 => tqdm>=4.23,<4.25 (PR #798)
138+
- pymongo>=3.6,<3.7 => pymongo>=3.6,<3.8 (PR #750)
140139
- pytest>=3.6,<3.7 => tqdm>=3.6,<3.8 (PR #798)
141-
- yapf>=0.21,<0.22 => yapf>=0.21,<0.23 (PR #798)
142-
- tqdm>=4.23,<4.25 => tqdm>=4.23,<4.26 (PR #798)
143140
- pytest-xdist>=1.22,<1.23 => pytest-xdist>=1.22,<1.24 (PR #805 and #806)
141+
- tqdm>=4.23,<4.25 => tqdm>=4.23,<4.26 (PR #798)
142+
- yapf>=0.21,<0.22 => yapf>=0.22,<0.24 (PR #798 #808)
144143

145144
### Contributors
146145

example/tutorial_binarynet_cifar10_tfrecord.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def read_and_decode(filename, is_train=None):
106106
img = tf.decode_raw(features['img_raw'], tf.float32)
107107
img = tf.reshape(img, [32, 32, 3])
108108
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
109-
if is_train ==True:
109+
if is_train == True:
110110
# 1. Randomly crop a [height, width] section of the image.
111111
img = tf.random_crop(img, [24, 24, 3])
112112

example/tutorial_bipedalwalker_a3c_continuous_action.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ def work(self):
194194
s_, r, done, _info = self.env.step(a)
195195

196196
# set robot falls reward to -2 instead of -100
197-
if r == -100: r = -2
197+
if r== -100: r = -2
198198

199199
ep_r += r
200200
buffer_s.append(s)
@@ -211,7 +211,7 @@ def work(self):
211211
buffer_v_target = []
212212

213213
for r in buffer_r[::-1]: # reverse buffer r
214-
v_s_ = r + GAMMA * v_s_
214+
v_s_ = r+ GAMMA * v_s_
215215
buffer_v_target.append(v_s_)
216216
buffer_v_target.reverse()
217217

example/tutorial_cartpole_ac.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ def __init__(self, sess, n_features, lr=0.01):
133133

134134
with tf.variable_scope('squared_TD_error'):
135135
# TD_error = r + lambd * V(newS) - V(S)
136-
self.td_error = self.r + LAMBDA * self.v_ - self.v
136+
self.td_error = self.r+ LAMBDA * self.v_ - self.v
137137
self.loss = tf.square(self.td_error)
138138

139139
with tf.variable_scope('train'):

example/tutorial_cifar10_tfrecord.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def read_and_decode(filename, is_train=None):
107107
img = tf.decode_raw(features['img_raw'], tf.float32)
108108
img = tf.reshape(img, [32, 32, 3])
109109
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
110-
if is_train ==True:
110+
if is_train == True:
111111
# 1. Randomly crop a [height, width] section of the image.
112112
img = tf.random_crop(img, [24, 24, 3])
113113

example/tutorial_dorefanet_cifar10_tfrecord.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def read_and_decode(filename, is_train=None):
106106
img = tf.decode_raw(features['img_raw'], tf.float32)
107107
img = tf.reshape(img, [32, 32, 3])
108108
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
109-
if is_train ==True:
109+
if is_train == True:
110110
# 1. Randomly crop a [height, width] section of the image.
111111
img = tf.random_crop(img, [24, 24, 3])
112112

example/tutorial_frozenlake_dqn.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def to_one_hot(i, n_classes=None):
7979
## Choose an action by greedily (with e chance of random action) from the Q-network
8080
a, allQ = sess.run([predict, y], feed_dict={inputs: [to_one_hot(s, 16)]})
8181
## e-Greedy Exploration !!! sample random action
82-
if np.random.rand(1) < e:
82+
if np.random.rand(1) <e:
8383
a[0] = env.action_space.sample()
8484
## Get new state and reward from environment
8585
s1, r, d, _ = env.step(a[0])
@@ -88,7 +88,7 @@ def to_one_hot(i, n_classes=None):
8888
## Obtain maxQ' and set our target value for chosen action.
8989
maxQ1 = np.max(Q1) # in Q-Learning, policy is greedy, so we use "max" to select the next action.
9090
targetQ = allQ
91-
targetQ[0, a[0]] = r + lambd * maxQ1
91+
targetQ[0, a[0]] = r+ lambd * maxQ1
9292
## Train network using target and predicted Q values
9393
# it is not real target Q value, it is just an estimation,
9494
# but check the Q-Learning update formula:
@@ -99,7 +99,7 @@ def to_one_hot(i, n_classes=None):
9999
rAll += r
100100
s = s1
101101
## Reduce chance of random action if an episode is done.
102-
if d ==True:
102+
if d == True:
103103
e = 1. / ((i / 50) + 10) # reduce e, GLIE: Greey in the limit with infinite Exploration
104104
break
105105

example/tutorial_frozenlake_q_table.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,13 +43,13 @@
4343
## Get new state and reward from environment
4444
s1, r, d, _ = env.step(a)
4545
## Update Q-Table with new knowledge
46-
Q[s, a] = Q[s, a] + lr * (r + lambd * np.max(Q[s1, :]) - Q[s, a])
46+
Q[s, a] = Q[s, a] + lr * (r+ lambd * np.max(Q[s1, :]) - Q[s, a])
4747
rAll += r
4848
s = s1
49-
if d ==True:
49+
if d == True:
5050
break
5151
rList.append(rAll)
52-
running_reward = r if running_reward is None else running_reward * 0.99 + r * 0.01
52+
running_reward = r if running_reward is None else running_reward * 0.99 +r* 0.01
5353
print("Episode [%d/%d] sum reward: %f running reward: %f took: %.5fs %s" % \
5454
(i, num_episodes, rAll, running_reward, time.time() - episode_time, '' if rAll == 0 else ' !!!!!!!!'))
5555

example/tutorial_ternaryweight_cifar10_tfrecord.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def read_and_decode(filename, is_train=None):
105105
img = tf.decode_raw(features['img_raw'], tf.float32)
106106
img = tf.reshape(img, [32, 32, 3])
107107
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
108-
if is_train ==True:
108+
if is_train == True:
109109
# 1. Randomly crop a [height, width] section of the image.
110110
img = tf.random_crop(img, [24, 24, 3])
111111

requirements/requirements_test.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,4 @@ pytest-cache>=1.0,<1.1
66
pytest-cov>=2.5,<2.6
77
pytest-xdist>=1.22,<1.24
88
sphinx>=1.7,<1.8
9-
yapf>=0.21,<0.23
9+
yapf>=0.22,<0.24

0 commit comments

Comments
 (0)