This repository was archived by the owner on Nov 21, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 447
Expand file tree
/
Copy pathoptimizers.py
More file actions
120 lines (101 loc) · 3.69 KB
/
optimizers.py
File metadata and controls
120 lines (101 loc) · 3.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#coding:utf-8
import os, sys
import os.path as osp
import numpy as np
import torch
from torch import nn
from torch.optim import Optimizer
from functools import reduce
from torch.optim import AdamW
class MultiOptimizer:
def __init__(self, optimizers={}, schedulers={}):
self.optimizers = optimizers
self.schedulers = schedulers
self.keys = list(optimizers.keys())
self.param_groups = reduce(lambda x,y: x+y, [v.param_groups for v in self.optimizers.values()])
def state_dict(self):
state_dicts = [(key, self.optimizers[key].state_dict())\
for key in self.keys]
return state_dicts
def scheduler_state_dict(self):
state_dicts = [(key, self.schedulers[key].state_dict())\
for key in self.keys]
return state_dicts
def load_state_dict(self, state_dict):
for key, val in state_dict:
try:
self.optimizers[key].load_state_dict(val)
except:
print("Unloaded %s" % key)
def load_scheduler_state_dict(self, state_dict):
for key, val in state_dict:
try:
self.schedulers[key].load_state_dict(val)
except:
print("Unloaded %s" % key)
def step(self, key=None, scaler=None):
keys = [key] if key is not None else self.keys
_ = [self._step(key, scaler) for key in keys]
def _step(self, key, scaler=None):
if scaler is not None:
scaler.step(self.optimizers[key])
scaler.update()
else:
self.optimizers[key].step()
def zero_grad(self, key=None):
if key is not None:
self.optimizers[key].zero_grad()
else:
_ = [self.optimizers[key].zero_grad() for key in self.keys]
def scheduler(self, *args, key=None):
if key is not None:
self.schedulers[key].step(*args)
else:
_ = [self.schedulers[key].step_batch(*args) for key in self.keys]
def define_scheduler(optimizer, params):
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=params['gamma'])
return scheduler
def build_optimizer(model_dict, lr, type='AdamW'):
optim = {}
for key, model in model_dict.items():
model_parameters = model.parameters()
parameters_names = []
parameters_names.append(
[
name_param_pair[0]
for name_param_pair in model.named_parameters()
]
)
if type == 'AdamW':
optim[key] = AdamW(
model_parameters,
lr=lr,
betas=(0.9, 0.98),
eps=1e-6,
weight_decay=0.01,
)
else:
raise ValueError('Unknown optimizer type: %s' % type)
schedulers = dict([(key, torch.optim.lr_scheduler.ExponentialLR(opt, gamma=0.999996))
for key, opt in optim.items()])
multi_optim = MultiOptimizer(optim, schedulers)
return multi_optim
class MinLRExponentialLR(torch.optim.lr_scheduler.ExponentialLR):
def __init__(self, optimizer, gamma, min_lr=1e-5):
self.min_lr = min_lr
super().__init__(optimizer, gamma)
def get_lr(self):
lrs = super().get_lr()
return [max(lr, self.min_lr) for lr in lrs]
def build_single_optimizer(model, lr,):
model_parameters = model.parameters()
parameters_require_grad = filter(lambda p: p.requires_grad, model_parameters)
optim = AdamW(
parameters_require_grad,
lr=lr,
betas=(0.9, 0.98),
eps=1e-6,
weight_decay=0.01,
)
scheduler = MinLRExponentialLR(optim, gamma=0.999996, min_lr=1e-5)
return optim, scheduler