We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent cd8a5d1 commit a5b107dCopy full SHA for a5b107d
pytorch_optimizer/optimizer/utils.py
@@ -48,7 +48,7 @@ def un_flatten_grad(grads: torch.Tensor, shapes: List[int]) -> List[torch.Tensor
48
un_flatten_grads: List[torch.Tensor] = []
49
for shape in shapes:
50
length = np.prod(shape)
51
- un_flatten_grads.append(grads[idx : idx + length].view(shape).clone())
+ un_flatten_grads.append(grads[idx:idx + length].view(shape).clone()) # fmt: skip
52
idx += length
53
return un_flatten_grads
54
0 commit comments