Skip to content

Commit 964594f

Browse files
Tested with TF 2.3.1
Tested with TF 2.3.1
1 parent 1075a23 commit 964594f

File tree

1 file changed

+245
-0
lines changed

1 file changed

+245
-0
lines changed

09_Pong-v0_A2C/Pong-v0_A2C_TF2.py

Lines changed: 245 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,245 @@
1+
# Tutorial by www.pylessons.com
2+
# Tutorial written for - Tensorflow 2.3.1
3+
4+
import os
5+
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
6+
import random
7+
import gym
8+
import pylab
9+
import numpy as np
10+
from tensorflow.keras.models import Model, load_model
11+
from tensorflow.keras.layers import Input, Dense, Lambda, Add, Conv2D, Flatten
12+
from tensorflow.keras.optimizers import Adam, RMSprop
13+
from tensorflow.keras import backend as K
14+
import cv2
15+
16+
def OurModel(input_shape, action_space, lr):
17+
X_input = Input(input_shape)
18+
19+
#X = Conv2D(32, 8, strides=(4, 4),padding="valid", activation="elu", data_format="channels_first", input_shape=input_shape)(X_input)
20+
#X = Conv2D(64, 4, strides=(2, 2),padding="valid", activation="elu", data_format="channels_first")(X)
21+
#X = Conv2D(64, 3, strides=(1, 1),padding="valid", activation="elu", data_format="channels_first")(X)
22+
X = Flatten(input_shape=input_shape)(X_input)
23+
24+
X = Dense(512, activation="elu", kernel_initializer='he_uniform')(X)
25+
#X = Dense(256, activation="elu", kernel_initializer='he_uniform')(X)
26+
#X = Dense(64, activation="elu", kernel_initializer='he_uniform')(X)
27+
28+
action = Dense(action_space, activation="softmax", kernel_initializer='he_uniform')(X)
29+
value = Dense(1, kernel_initializer='he_uniform')(X)
30+
31+
Actor = Model(inputs = X_input, outputs = action)
32+
Actor.compile(loss='categorical_crossentropy', optimizer=RMSprop(lr=lr))
33+
34+
Critic = Model(inputs = X_input, outputs = value)
35+
Critic.compile(loss='mse', optimizer=RMSprop(lr=lr))
36+
37+
return Actor, Critic
38+
39+
class A2CAgent:
40+
# Actor-Critic Main Optimization Algorithm
41+
def __init__(self, env_name):
42+
# Initialization
43+
# Environment and PPO parameters
44+
self.env_name = env_name
45+
self.env = gym.make(env_name)
46+
self.action_size = self.env.action_space.n
47+
self.EPISODES, self.max_average = 10000, -21.0 # specific for pong
48+
self.lr = 0.000025
49+
50+
self.ROWS = 80
51+
self.COLS = 80
52+
self.REM_STEP = 4
53+
54+
# Instantiate games and plot memory
55+
self.states, self.actions, self.rewards = [], [], []
56+
self.scores, self.episodes, self.average = [], [], []
57+
58+
self.Save_Path = 'Models'
59+
self.state_size = (self.REM_STEP, self.ROWS, self.COLS)
60+
self.image_memory = np.zeros(self.state_size)
61+
62+
if not os.path.exists(self.Save_Path): os.makedirs(self.Save_Path)
63+
self.path = '{}_A2C_{}'.format(self.env_name, self.lr)
64+
self.Model_name = os.path.join(self.Save_Path, self.path)
65+
66+
# Create Actor-Critic network model
67+
self.Actor, self.Critic = OurModel(input_shape=self.state_size, action_space = self.action_size, lr=self.lr)
68+
69+
70+
def remember(self, state, action, reward):
71+
# store episode actions to memory
72+
self.states.append(state)
73+
action_onehot = np.zeros([self.action_size])
74+
action_onehot[action] = 1
75+
self.actions.append(action_onehot)
76+
self.rewards.append(reward)
77+
78+
79+
def act(self, state):
80+
# Use the network to predict the next action to take, using the model
81+
prediction = self.Actor.predict(state)[0]
82+
action = np.random.choice(self.action_size, p=prediction)
83+
return action
84+
85+
def discount_rewards(self, reward):
86+
# Compute the gamma-discounted rewards over an episode
87+
gamma = 0.99 # discount rate
88+
running_add = 0
89+
discounted_r = np.zeros_like(reward)
90+
for i in reversed(range(0,len(reward))):
91+
if reward[i] != 0: # reset the sum, since this was a game boundary (pong specific!)
92+
running_add = 0
93+
running_add = running_add * gamma + reward[i]
94+
discounted_r[i] = running_add
95+
96+
discounted_r -= np.mean(discounted_r) # normalizing the result
97+
discounted_r /= np.std(discounted_r) # divide by standard deviation
98+
return discounted_r
99+
100+
101+
def replay(self):
102+
# reshape memory to appropriate shape for training
103+
states = np.vstack(self.states)
104+
actions = np.vstack(self.actions)
105+
106+
# Compute discounted rewards
107+
discounted_r = self.discount_rewards(self.rewards)
108+
109+
# Get Critic network predictions
110+
values = self.Critic.predict(states)[:, 0]
111+
# Compute advantages
112+
advantages = discounted_r - values
113+
# training Actor and Critic networks
114+
self.Actor.fit(states, actions, sample_weight=advantages, epochs=1, verbose=0)
115+
self.Critic.fit(states, discounted_r, epochs=1, verbose=0)
116+
# reset training memory
117+
self.states, self.actions, self.rewards = [], [], []
118+
119+
def load(self, Actor_name, Critic_name):
120+
self.Actor = load_model(Actor_name, compile=False)
121+
#self.Critic = load_model(Critic_name, compile=False)
122+
123+
def save(self):
124+
self.Actor.save(self.Model_name + '_Actor.h5')
125+
#self.Critic.save(self.Model_name + '_Critic.h5')
126+
127+
pylab.figure(figsize=(18, 9))
128+
def PlotModel(self, score, episode):
129+
self.scores.append(score)
130+
self.episodes.append(episode)
131+
self.average.append(sum(self.scores[-50:]) / len(self.scores[-50:]))
132+
if str(episode)[-2:] == "00":# much faster than episode % 100
133+
pylab.plot(self.episodes, self.scores, 'b')
134+
pylab.plot(self.episodes, self.average, 'r')
135+
pylab.ylabel('Score', fontsize=18)
136+
pylab.xlabel('Steps', fontsize=18)
137+
try:
138+
pylab.savefig(self.path+".png")
139+
except OSError:
140+
pass
141+
142+
return self.average[-1]
143+
144+
def imshow(self, image, rem_step=0):
145+
cv2.imshow(self.Model_name+str(rem_step), image[rem_step,...])
146+
if cv2.waitKey(25) & 0xFF == ord("q"):
147+
cv2.destroyAllWindows()
148+
return
149+
150+
def GetImage(self, frame):
151+
# croping frame to 80x80 size
152+
frame_cropped = frame[35:195:2, ::2,:]
153+
if frame_cropped.shape[0] != self.COLS or frame_cropped.shape[1] != self.ROWS:
154+
# OpenCV resize function
155+
frame_cropped = cv2.resize(frame, (self.COLS, self.ROWS), interpolation=cv2.INTER_CUBIC)
156+
157+
# converting to RGB (numpy way)
158+
frame_rgb = 0.299*frame_cropped[:,:,0] + 0.587*frame_cropped[:,:,1] + 0.114*frame_cropped[:,:,2]
159+
160+
# convert everything to black and white (agent will train faster)
161+
frame_rgb[frame_rgb < 100] = 0
162+
frame_rgb[frame_rgb >= 100] = 255
163+
# converting to RGB (OpenCV way)
164+
#frame_rgb = cv2.cvtColor(frame_cropped, cv2.COLOR_RGB2GRAY)
165+
166+
# dividing by 255 we expresses value to 0-1 representation
167+
new_frame = np.array(frame_rgb).astype(np.float32) / 255.0
168+
169+
# push our data by 1 frame, similar as deq() function work
170+
self.image_memory = np.roll(self.image_memory, 1, axis = 0)
171+
172+
# inserting new frame to free space
173+
self.image_memory[0,:,:] = new_frame
174+
175+
# show image frame
176+
#self.imshow(self.image_memory,0)
177+
#self.imshow(self.image_memory,1)
178+
#self.imshow(self.image_memory,2)
179+
#self.imshow(self.image_memory,3)
180+
181+
return np.expand_dims(self.image_memory, axis=0)
182+
183+
def reset(self):
184+
frame = self.env.reset()
185+
for i in range(self.REM_STEP):
186+
state = self.GetImage(frame)
187+
return state
188+
189+
def step(self, action):
190+
next_state, reward, done, info = self.env.step(action)
191+
next_state = self.GetImage(next_state)
192+
return next_state, reward, done, info
193+
194+
def run(self):
195+
for e in range(self.EPISODES):
196+
state = self.reset()
197+
done, score, SAVING = False, 0, ''
198+
while not done:
199+
#self.env.render()
200+
# Actor picks an action
201+
action = self.act(state)
202+
# Retrieve new state, reward, and whether the state is terminal
203+
next_state, reward, done, _ = self.step(action)
204+
# Memorize (state, action, reward) for training
205+
self.remember(state, action, reward)
206+
# Update current state
207+
state = next_state
208+
score += reward
209+
if done:
210+
average = self.PlotModel(score, e)
211+
# saving best models
212+
if average >= self.max_average:
213+
self.max_average = average
214+
self.save()
215+
SAVING = "SAVING"
216+
else:
217+
SAVING = ""
218+
print("episode: {}/{}, score: {}, average: {:.2f} {}".format(e, self.EPISODES, score, average, SAVING))
219+
220+
self.replay()
221+
# close environemnt when finish training
222+
self.env.close()
223+
224+
def test(self, Actor_name, Critic_name):
225+
self.load(Actor_name, Critic_name)
226+
for e in range(100):
227+
state = self.reset()
228+
done = False
229+
score = 0
230+
while not done:
231+
action = np.argmax(self.Actor.predict(state))
232+
state, reward, done, _ = self.step(action)
233+
score += reward
234+
if done:
235+
print("episode: {}/{}, score: {}".format(e, self.EPISODES, score))
236+
break
237+
self.env.close()
238+
239+
if __name__ == "__main__":
240+
#env_name = 'PongDeterministic-v4'
241+
env_name = 'Pong-v0'
242+
agent = A2CAgent(env_name)
243+
agent.run()
244+
#agent.test('Pong-v0_A2C_2.5e-05_Actor.h5', '')
245+
#agent.test('PongDeterministic-v4_A2C_1e-05_Actor.h5', '')

0 commit comments

Comments
 (0)