Skip to content

Commit ccf301b

Browse files
committed
update: init alpha_k
1 parent 58b4226 commit ccf301b

File tree

1 file changed

+2
-10
lines changed

1 file changed

+2
-10
lines changed

pytorch_optimizer/optimizer/dadapt.py

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,7 @@ def reset(self):
7777

7878
state = self.state[p]
7979

80-
try:
81-
state['alpha_k'] = torch.full_like(p, fill_value=1e-6)
82-
except NotImplementedError: # there's no fill_() op for SpareTensorCPU
83-
state['alpha_k'] = torch.zeros_like(p)
84-
80+
state['alpha_k'] = torch.full_like(p, fill_value=1e-6)
8581
state['sk'] = torch.zeros_like(p)
8682
state['x0'] = torch.clone(p)
8783
if p.grad.is_sparse:
@@ -119,11 +115,7 @@ def step(self, closure: CLOSURE = None) -> LOSS:
119115

120116
state = self.state[p]
121117
if 'alpha_k' not in state:
122-
try:
123-
state['alpha_k'] = torch.full_like(p, fill_value=1e-6)
124-
except NotImplementedError: # there's no fill_() op for SpareTensorCPU
125-
state['alpha_k'] = torch.zeros_like(p)
126-
118+
state['alpha_k'] = torch.full_like(p, fill_value=1e-6)
127119
state['sk'] = torch.zeros_like(p)
128120
state['x0'] = torch.clone(p)
129121
if grad.is_sparse:

0 commit comments

Comments
 (0)