Skip to content

Commit f6518d9

Browse files
committed
anim fps rnn
1 parent c0995e0 commit f6518d9

File tree

6 files changed

+245
-3
lines changed

6 files changed

+245
-3
lines changed

.vscode/settings.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
{
22
"python.pythonPath": "C:\\Users\\Federico\\.conda\\envs\\dev\\python.exe",
3-
"cSpell.enabled": true
3+
"cSpell.enabled": false
44
}

analysis_workspace.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@
2323

2424
config = None
2525

26+
f, ax = plt.subplots()
27+
28+
sim_lengths = []
2629
for fld in track(flds):
2730
_config, trajectory, history, cost_history = load_results_from_folder(fld)
2831

@@ -38,7 +41,9 @@
3841
loaded["history"].append(history)
3942
loaded["cost_history"].append(cost_history)
4043

44+
sim_lengths.append(len(history))
4145

46+
plt.hist(sim_lengths)
4247
pi.ok("Data loaded")
4348

4449
# %%

proj/environment/manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def _save_video(self):
107107
animate_from_images(
108108
str(self.frames_folder),
109109
str(self.datafolder / f"{self.exp_name}.mp4"),
110-
10,
110+
int(round(1 / self.mouse.dt)),
111111
)
112112
except (ValueError, FileNotFoundError):
113113
print("Failed to generate video from frames.. ")

proj/run/runner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def render(self, task):
3838

3939
# run
4040
def run_experiment(
41-
environment, controller, model, n_secs=10, frames_folder=None,
41+
environment, controller, model, n_secs=0.5, frames_folder=None,
4242
):
4343
"""
4444
Runs an experiment

rnn/__init__.py

Whitespace-only changes.

rnn/cartesian_rnn.py

Lines changed: 237 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,237 @@
1+
"""
2+
Training of a simple RNN that receives trajectories
3+
info (cartesian model) and learns to predict the output predicted by the
4+
optimal control cartesian model
5+
6+
Install pythorch from: https://pytorch.org/get-started/locally/
7+
8+
example code: https://github.com/FedeClaudi/wheel_control_rnn/blob/master/learn_rnn_me.py
9+
"""
10+
# %%
11+
import torch
12+
import torch.nn as nn
13+
import torch.optim as optim
14+
from torch.utils.data import Dataset, DataLoader
15+
16+
import numpy as np
17+
import matplotlib.pyplot as plt
18+
import random
19+
import pyinspect as pi
20+
from pathlib import Path
21+
from rich.progress import track
22+
23+
pi.install_traceback()
24+
25+
import sys
26+
27+
sys.path.append("./")
28+
from proj import paths
29+
from proj.utils.misc import load_results_from_folder
30+
31+
# %%
32+
# parameters
33+
BATCH_SIZE = 64 # our batch size is fixed for now
34+
N_INPUT = 80 # length of the input vector
35+
N_VECS = 5 # dimensionality of input
36+
N_NEURONS = 100
37+
N_EPOCHS = 25
38+
DATASET_LENGTH = 10000
39+
40+
41+
# Other params
42+
MAX_TRAJ_LEN = 800 # only simulation in which the model reached the goal within N steps are accepted
43+
# all simulations longer than this are rejected
44+
MIN_TRAJ_LEN = 400 # trajectories are truncated to be of this length
45+
46+
# %%
47+
# ---------------------------------------------------------------------------- #
48+
# MAKE DATA #
49+
# ---------------------------------------------------------------------------- #
50+
51+
52+
class ToTensor(object):
53+
"""Convert ndarrays in sample to Tensors."""
54+
55+
def __call__(self, sample):
56+
vec, label = sample["vec"], sample["label"]
57+
58+
return {
59+
"vec": torch.from_numpy(np.array(vec)).to(dtype=torch.float),
60+
"label": torch.from_numpy(np.array(label)).to(dtype=torch.float),
61+
}
62+
63+
64+
class MyData(Dataset):
65+
def __init__(self, length, veclen, n_vecs, transform=False):
66+
self.length = length
67+
self.veclen = veclen
68+
self.transform = transform
69+
self.n_vecs = n_vecs
70+
71+
self.simulation_folders = [
72+
f for f in Path(paths.db_app).glob("*") if f.is_dir()
73+
]
74+
75+
def __len__(self):
76+
return self.length
77+
78+
def __getitem__(self, idx):
79+
if torch.is_tensor(idx):
80+
idx = idx.tolist()
81+
82+
# load data
83+
while True: # keep trying until we get a trajectory with all samples
84+
85+
# Get a simulation folder and load data
86+
fld = random.choice(self.simulation_folders)
87+
88+
(
89+
config,
90+
trajectory,
91+
history,
92+
cost_history,
93+
) = load_results_from_folder(fld)
94+
95+
# Get controls history
96+
controls = np.vstack(history[["tau_r", "tau_l"]].values)
97+
98+
# Get 'inputs' that resulted in the controls
99+
inputs = np.vstack(
100+
[trajectory[s, :] for s in history.trajectory_idx]
101+
)
102+
103+
if (
104+
controls.shape[0] > MAX_TRAJ_LEN
105+
or controls.shape[0] < MIN_TRAJ_LEN
106+
):
107+
# too long or too short
108+
continue
109+
else:
110+
# truncate
111+
controls = controls[:MIN_TRAJ_LEN, :]
112+
inputs = inputs[:MIN_TRAJ_LEN, :]
113+
break # exit loop
114+
115+
sample = {"vec": inputs, "label": controls}
116+
117+
if self.transform:
118+
sample = self.transform(sample)
119+
120+
return sample
121+
122+
123+
v, i = MyData(DATASET_LENGTH, N_INPUT, N_VECS, transform=ToTensor())[
124+
0
125+
].values()
126+
127+
pi.ok("Data model created", f"Input {v.shape}\nLabel {i.shape}")
128+
129+
130+
# %%
131+
132+
# ---------------------------------------------------------------------------- #
133+
# Define model #
134+
# ---------------------------------------------------------------------------- #
135+
136+
137+
class Model(nn.Module):
138+
def __init__(self, batch_size, n_inputs, n_neurons):
139+
super(Model, self).__init__()
140+
141+
self.batch_size = batch_size
142+
self.n_inputs = n_inputs
143+
self.n_neurons = n_neurons
144+
145+
self.rnn = nn.RNN(self.n_inputs, self.n_neurons, nonlinearity="relu")
146+
147+
def init_hidden(self,):
148+
# (num_layers, batch_size, n_neurons)
149+
return torch.zeros(1, self.batch_size, self.n_neurons)
150+
151+
def forward(self, X):
152+
# Reshape X: n_steps X batch_size X n_inputs
153+
X = X.unsqueeze(0)
154+
155+
# for each time step
156+
self.hidden = self.rnn(X, self.hidden)
157+
158+
return [
159+
s.reshape(self.batch_size, self.n_neurons) for s in self.hidden
160+
]
161+
162+
163+
# %%
164+
# ---------------------------------------------------------------------------- #
165+
# TRAINING #
166+
# ---------------------------------------------------------------------------- #
167+
# Get model
168+
model = Model(BATCH_SIZE, N_INPUT, N_NEURONS)
169+
170+
# Get optimizer
171+
optimizer = optim.Adam(model.parameters(), lr=0.001)
172+
173+
# Device
174+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
175+
176+
# Get loss function
177+
lossfn = nn.MSELoss()
178+
179+
180+
def getloss(activations, target):
181+
"""
182+
Output is the sum of each units' activation
183+
and needs to match the target
184+
"""
185+
return lossfn(activations.sum(axis=1), target)
186+
187+
188+
# Get dataset
189+
mydata = MyData(DATASET_LENGTH, N_INPUT, N_VECS, transform=ToTensor())
190+
dataloader = DataLoader(
191+
mydata, batch_size=BATCH_SIZE, shuffle=True, num_workers=0, drop_last=True
192+
)
193+
194+
195+
pi.ok("Ready to train")
196+
197+
# %%
198+
# ----------------------------------- Train ---------------------------------- #
199+
loss_record = []
200+
for epoch in track(range(N_EPOCHS), total=N_EPOCHS):
201+
train_running_loss = 0.0
202+
model.train()
203+
204+
# TRAINING ROUND
205+
for i, data in enumerate(dataloader):
206+
# zero the parameter gradients
207+
optimizer.zero_grad()
208+
209+
# get the inputs
210+
inputs, labels = data.values()
211+
212+
# reset hidden states
213+
model.hidden = model.init_hidden()
214+
215+
# forward
216+
output, hidden = model(inputs) # returns the inner state
217+
218+
# Compute loss
219+
loss = getloss(hidden, labels)
220+
221+
# backward and SGD
222+
loss.backward()
223+
optimizer.step()
224+
225+
train_running_loss += loss.detach().item()
226+
227+
model.eval()
228+
if epoch % 10 == 0:
229+
print(f"Epoch: {epoch} | Loss: {round(train_running_loss, 2)}")
230+
loss_record.append(train_running_loss)
231+
232+
plt.plot(loss_record)
233+
234+
235+
# %%
236+
237+
# %%

0 commit comments

Comments
 (0)