We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 48342a5 commit b3896a5Copy full SHA for b3896a5
pytorch_optimizer/adahessian.py
@@ -187,9 +187,7 @@ def step(self, closure: CLOSURE = None) -> LOSS:
187
188
if len(state) == 1:
189
state['step'] = 0
190
- # Exponential moving average of gradient values
191
state['exp_avg'] = torch.zeros_like(p.data)
192
- # Exponential moving average of Hessian diagonal square values
193
state['exp_hessian_diag_sq'] = torch.zeros_like(p.data)
194
195
exp_avg, exp_hessian_diag_sq = (
0 commit comments