-
Notifications
You must be signed in to change notification settings - Fork 20
Expand file tree
/
Copy pathtrain.py
More file actions
116 lines (91 loc) · 3.98 KB
/
train.py
File metadata and controls
116 lines (91 loc) · 3.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import os
import sys
import random
import math
import time
import numpy as np
import cv2
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from utils.util import *
from model import *
def train_pmodel():
print('training start')
train_dataset = myDatasets(os.path.join(config.data_path, 'ws/train_data_with_crop_step1.hdf5'))
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
model, device = get_pmodel()
if config.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
elif config.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=config.learning_rate, momentum=config.momentum)
else:
optimizer = None
pass
scheduler = optim.lr_scheduler.StepLR(optimizer, config.lr_step, config.lr_gamma)
loss_func = nn.MSELoss()
# loss_func = nn.SmoothL1Loss()
# loss_func = WingLoss()
for epoch in range(config.epochs):
model.train()
for i, (datas, labels, _) in enumerate(train_loader):
datas, labels = datas.to(device, dtype=torch.float), labels.to(device, dtype=torch.float)
preds = model(datas)
optimizer.zero_grad()
loss = loss_func(preds, labels)
loss.backward()
optimizer.step()
if i % config.log_interval == 0:
date = time.strftime("%Y-%m-%d %H:%M:%S")
print('{}\t epoch={:0>3}, batch={:0>4}, lr={:.5f}, loss={:.5f}'.format(date, epoch, i, optimizer.state_dict()['param_groups'][0]['lr'], loss.item()))
scheduler.step()
save_path = os.path.join(config.data_path, 'ws/models/model_step1_{:0>2}.pt'.format(epoch))
torch.save(model.state_dict(), save_path)
print('training finished')
exit()
def train_rmodel():
print('training start')
train_dataset = myDatasets(os.path.join(config.data_path, 'ws/train_data_with_crop_step2.hdf5'))
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
model, device = get_rmodel()
if config.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
elif config.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=config.learning_rate, momentum=config.momentum)
else:
optimizer = None
pass
scheduler = optim.lr_scheduler.StepLR(optimizer, config.lr_step, config.lr_gamma)
loss1 = nn.MSELoss(reduce=False)
# loss1 = nn.SmoothL1Loss()
# loss1 = WingLoss()
loss2 = nn.NLLLoss()
for epoch in range(config.epochs):
model.train()
for i, (datas, mark_labels, face_labels) in enumerate(train_loader):
datas = datas.to(device, dtype=torch.float)
mark_labels = mark_labels.to(device, dtype=torch.float)
mark_weight = face_labels.to(device, dtype=torch.float)
face_labels = face_labels.to(device, dtype=torch.long)
mark_preds, face_preds = model(datas)
optimizer.zero_grad()
loss1_ = loss1(mark_preds, mark_labels).mean(dim=1) * mark_weight
loss2_ = loss2(face_preds, face_labels)
loss = loss1_.mean() + config.class_weight * loss2_
loss.backward()
optimizer.step()
if i % config.log_interval == 0:
date = time.strftime("%Y-%m-%d %H:%M:%S")
print('{}\t epoch={:0>2}, batch={:0>4}, lr={:.5f}, loss={:.5f}'.format(date, epoch, i, optimizer.state_dict()['param_groups'][0]['lr'], loss.item()))
scheduler.step()
save_path = os.path.join(config.data_path, 'ws/models/model_step2_{:0>2}.pt'.format(epoch))
torch.save(model.state_dict(), save_path)
print('training finished')
if __name__ == '__main__':
# train_pmodel()
train_rmodel()