We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 161241e commit eea0a94Copy full SHA for eea0a94
src/lightning/pytorch/plugins/precision/fsdp.py
@@ -82,7 +82,7 @@ def convert_module(self, module: Module) -> Module:
82
return module
83
84
@override
85
- def clip_grad_by_norm(self, module: Optional[Module], optimizer: Optimizer, clip_val: Union[int, float]) -> None:
+ def clip_grad_by_norm(self, optimizer: Optimizer, clip_val: Union[int, float], module: Optional[Module] = None) -> None:
86
# see https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.clip_grad_norm_
87
if module is None:
88
return
0 commit comments