forked from frsong/tf-examples
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patha3c_a3c.py
More file actions
290 lines (233 loc) · 9.72 KB
/
a3c_a3c.py
File metadata and controls
290 lines (233 loc) · 9.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
"""
Asynchronous advantage actor-critic (A3C) based on
https://github.com/openai/universe-starter-agent
Original paper:
Asynchronous methods for deep reinforcement learning.
https://arxiv.org/abs/1602.01783
"""
import queue
from collections import namedtuple
from threading import Thread
import numpy as np
import scipy.signal
import tensorflow as tf
from a3c_model import Policy
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
Batch = namedtuple('Batch', ('obs', 'a', 'adv', 'r', 'done', 'state'))
def process_rollout(rollout, gamma=0.99, lambda_=1.0):
"""
Compute the returns and advantages for this rollout.
"""
batch_obs = np.asarray(rollout.observations)
batch_a = np.asarray(rollout.actions)
rewards = np.asarray(rollout.rewards)
vpred_t = np.asarray(rollout.values + [rollout.r])
rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])
batch_r = discount(rewards_plus_v, gamma)[:-1]
delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]
batch_adv = discount(delta_t, gamma * lambda_)
state = rollout.states[0]
return Batch(batch_obs, batch_a, batch_adv, batch_r, rollout.done, state)
class Rollout(object):
def __init__(self):
self.observations = []
self.actions = []
self.rewards = []
self.values = []
self.states = []
self.r = 0.0
self.done = False
def add(self, obs, action, reward, value, done, state):
self.observations += [obs]
self.actions += [action]
self.rewards += [reward]
self.values += [value]
self.states += [state]
self.done = done
def extend(self, rollout):
assert not self.done
self.observations += rollout.observations
self.actions += rollout.actions
self.rewards += rollout.rewards
self.values += rollout.values
self.states += rollout.states
self.r = rollout.r
self.done = rollout.done
class RunnerThread(Thread):
def __init__(self, env, policy, num_local_steps=20):
super(RunnerThread, self).__init__()
self.env = env
self.policy = policy
self.num_local_steps = num_local_steps
self.queue = queue.Queue(5)
self.daemon = True
self.last_state = None
self.sess = None
self.summary_writer = None
def start_runner(self, sess, summary_writer):
self.sess = sess
self.summary_writer = summary_writer
self.start()
def run(self):
with self.sess.as_default():
rollout_provider = env_runner(
self.env, self.policy, self.num_local_steps, self.summary_writer
)
while True:
# Original code says the timeout is needed to make the
# workers die together
self.queue.put(next(rollout_provider), timeout=600.0)
def env_runner(env, policy, num_local_steps, summary_writer):
last_obs = env.reset()
last_state = policy.get_initial_state()
length = 0
rewards = 0
while True:
terminal_end = False
rollout = Rollout()
for _ in range(num_local_steps):
# Run policy for one step
action, value, state = policy.act(last_obs, last_state)
# Perform the action
obs, reward, done, info = env.step(action.argmax())
# Add to rollout
rollout.add(last_obs, action, reward, value, done, last_state)
length += 1
rewards += reward
# Update observation and state
last_obs = obs
last_state = state
if info:
summary = tf.Summary()
for k, v in info.items():
summary.value.add(tag=k, simple_value=float(v))
summary_writer.add_summary(summary, policy.global_step.eval())
summary_writer.flush()
tag = 'wrapper_config.TimeLimit.max_episode_steps'
max_length = env.spec.tags.get(tag)
if done or length >= max_length:
terminal_end = True
if (length >= max_length
or not env.metadata.get('semantics.autoreset')):
last_obs = env.reset()
last_state = policy.get_initial_state()
length = 0
rewards = 0
break
if not terminal_end:
rollout.r = policy.value(last_obs, last_state)
yield rollout
class A3C(object):
def __init__(self, env, task):
self.env = env
self.task = task
# Seed the random number generator for reproducible initialization
rng = np.random.RandomState(task)
# Devices
worker_device = '/job:worker/task:{}/cpu:0'.format(task)
replica_device = tf.train.replica_device_setter(
1, worker_device=worker_device
)
# Shared network
with tf.device(replica_device):
with tf.variable_scope('global'):
self.network = Policy(env.observation_space.shape,
env.action_space.n, rng=rng)
init = tf.constant_initializer(0, dtype=tf.int32)
self.global_step = tf.get_variable('global_step',
[],
tf.int32,
initializer=init,
trainable=False)
# Worker network
with tf.device(worker_device):
with tf.variable_scope('local'):
self.local_network = pi = Policy(env.observation_space.shape,
env.action_space.n, rng=rng)
pi.global_step = self.global_step
self.ac = tf.placeholder(tf.float32, [None, env.action_space.n],
name='ac')
self.adv = tf.placeholder(tf.float32, [None], name='adv')
self.r = tf.placeholder(tf.float32, [None], name='r')
# Action probabilities
log_prob_tf = tf.nn.log_softmax(pi.logits)
prob_tf = tf.nn.softmax(pi.logits)
# Policy loss
pi_loss = tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv
pi_loss = -tf.reduce_sum(pi_loss)
# Value loss
vf_loss = tf.square(pi.vf - self.r)
vf_loss = 0.5 * tf.reduce_sum(vf_loss)
# Entropy term
entropy = -tf.reduce_sum(prob_tf * log_prob_tf)
# Total loss
lambda_v = 0.5
lambda_e = 0.01
self.loss = pi_loss + lambda_v * vf_loss - lambda_e * entropy
# Runner
self.runner = RunnerThread(env, pi)
# Policy gradients
grads = tf.gradients(self.loss, pi.variables)
# Norms
grad_norm = tf.global_norm(grads)
var_norm = tf.global_norm(pi.variables)
# Add to summary
bs = tf.to_float(tf.shape(pi.x)[0])
tf.summary.scalar('model/policy_loss', pi_loss / bs)
tf.summary.scalar('model/value_loss', vf_loss / bs)
tf.summary.scalar('model/entropy', entropy / bs)
tf.summary.scalar('model/grad_global_norm', grad_norm)
tf.summary.scalar('model/var_global_norm', var_norm)
tf.summary.image('observation', pi.x)
self.summary_op = tf.summary.merge_all()
# Copy weights from the parameter server to the local model
sync_op = [v1.assign(v2)
for v1, v2 in zip(pi.variables, self.network.variables)]
self.sync_op = tf.group(*sync_op)
# Increment step
inc_step = self.global_step.assign_add(tf.shape(pi.x)[0])
# Learning rate
learning_rate = 1e-4
# Each worker gets its own optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, _ = tf.clip_by_global_norm(grads, 40.0)
grads_vars = zip(grads, self.network.variables)
train_op = optimizer.apply_gradients(grads_vars)
self.train_op = tf.group(train_op, inc_step)
self.summary_writer = None
self.local_steps = 0
def start(self, sess, summary_writer):
self.runner.start_runner(sess, summary_writer)
self.summary_writer = summary_writer
def pull_batch_from_queue(self):
rollout = self.runner.queue.get(timeout=600.0)
while not rollout.done:
try:
rollout.extend(self.runner.queue.get_nowait())
except queue.Empty:
break
return rollout
def process(self, sess):
# Copy weights from shared to local
sess.run(self.sync_op)
# Get batch
rollout = self.pull_batch_from_queue()
batch = process_rollout(rollout)
self.local_steps += 1
summary = (self.task == 0 and self.local_steps % 10 == 0)
fetches = [self.train_op, self.global_step]
if summary:
fetches += [self.summary_op]
feed_dict = {
self.local_network.x: batch.obs,
self.ac: batch.a,
self.adv: batch.adv,
self.r: batch.r,
self.local_network.state_in: batch.state
}
fetched = sess.run(fetches, feed_dict)
if summary:
self.summary_writer.add_summary(tf.Summary.FromString(fetched[-1]),
fetched[1])
self.summary_writer.flush()