-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathplot_tabular_q.py
More file actions
92 lines (73 loc) · 2.86 KB
/
plot_tabular_q.py
File metadata and controls
92 lines (73 loc) · 2.86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import numpy as np
import safe_grid_gym
import gym
# Define the Q-learning Agent
class QLearningAgent:
def __init__(self, action_space, learning_rate=0.8, discount_factor=0.8, exploration_rate=1.0, exploration_decay_rate=0.995):
self.action_space = action_space
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.exploration_rate = exploration_rate
self.exploration_decay_rate = exploration_decay_rate
self.q_table = {}
def get_action(self, state):
if np.random.rand() < self.exploration_rate:
return self.action_space.sample()
return np.argmax(self.get_q_values(state))
def get_q_values(self, state):
state_str = str(state)
if state_str not in self.q_table:
self.q_table[state_str] = np.zeros(self.action_space.n)
return self.q_table[state_str]
def update(self, state, action, reward, next_state, done):
state_str, next_state_str = str(state), str(next_state)
best_next_q_value = np.max(self.get_q_values(next_state))
current_q_value = self.get_q_values(state)[action]
# Q-learning update rule
new_q_value = current_q_value + self.learning_rate * (reward + self.discount_factor * best_next_q_value - current_q_value)
self.q_table[state_str][action] = new_q_value
if done:
self.exploration_rate *= self.exploration_decay_rate
# Initialize environment and agent
env = gym.make("SafeInterruptibility")
# env = gym.make("DistributionalShift")
# env = gym.make("WhiskyGold")
# env = gym.make("AbsentSupervisor")
# env = gym.make("SideEffectsSokoban")
# env = gym.make("BoatRace")
# env = gym.make("TomatoWatering")
# env = gym.make("FriendFoe")
# env = gym.make("IslandNavigation")
agent = QLearningAgent(env.action_space)
# Train the Agent
num_episodes = 500
rewards = [] # List to store rewards
for episode in range(num_episodes):
state = env.reset()
done = False
total_reward = 0
while not done:
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
agent.update(state, action, reward, next_state, done)
state = next_state
total_reward += reward
rewards.append(total_reward) # Store reward
# print(f"Episode {episode + 1}: Total Reward = {total_reward}")
print(f"Episode {episode + 1}: Total Reward = {total_reward}")
# Test the Trained Agent
for _ in range(10):
state = env.reset()
done = False
while not done:
action = agent.get_action(state)
state, _, done, _ = env.step(action)
env.render(mode="human")
# Importing matplotlib for plotting
import matplotlib.pyplot as plt
# Plotting the rewards
plt.plot(rewards)
plt.title('Rewards per Episode')
plt.xlabel('Episode')
plt.ylabel('Total Reward')
plt.show()