We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8d202da commit ed33e31Copy full SHA for ed33e31
pytorch_optimizer/optimizer/madgrad.py
@@ -110,8 +110,7 @@ def step(self, closure: CLOSURE = None) -> LOSS:
110
if momentum > 0.0 and grad.is_sparse:
111
raise NoSparseGradientError(self.__str__, note='momentum > 0.0')
112
113
- grad_sum_sq = state['grad_sum_sq']
114
- s = state['s']
+ grad_sum_sq, s = state['grad_sum_sq'], state['s']
115
116
if weight_decay > 0.0 and not self.decouple_decay:
117
if grad.is_sparse:
0 commit comments