-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
75 lines (59 loc) · 2.22 KB
/
utils.py
File metadata and controls
75 lines (59 loc) · 2.22 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import torch
import numpy as np
import os
from models.NGram import NGram
def train_one_epoch(model,criterion,train_loader,optimizer,device,epoch):
running_loss = 0.
model.train(True)
for i, data in enumerate(train_loader):
inputs,labels=data
inputs=inputs.to(device)
labels=labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
if isinstance(model,NGram):
labels=labels[:,-1]
else:
labels=labels.view(-1)
loss = criterion(outputs.view(-1, outputs.size(-1)), labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 500 == 499:
last_loss = running_loss/500
print(f'Epoch {epoch} batch {i+1} loss: {last_loss}')
running_loss = 0.
@torch.no_grad()
def evaluate(model,data_loader,device):
model.eval()
criterion = torch.nn.CrossEntropyLoss()
running_loss = 0.
cumulative_loss=[]
for i, data in enumerate(data_loader):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
if isinstance(model, NGram):
labels = labels[:, -1]
else:
labels = labels.view(-1)
loss = criterion(outputs.view(-1, outputs.size(-1)), labels)
running_loss += loss.item()
cumulative_loss.append(loss.item())
if i % 300 == 299:
last_loss = running_loss/300
print(f'Validation loss in batch {i+1} loss: {last_loss}')
running_loss = 0.
print(f'Validation loss total: {np.mean(cumulative_loss)}')
def load_model_from_path(model,name):
try:
file_path = os.path.join(os.path.dirname(__file__), name)
#model.load_state_dict(torch.load(file_path, map_location="cpu"))
checkpoint = torch.load(file_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
except:
print("Unsucesfully loading model from path")
def save_model_to_path(model,name):
file_path = os.path.join(os.path.dirname(__file__), name)
torch.save({'model_state_dict': model.state_dict()}, file_path)