We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ed2fe05 commit 2f62a0aCopy full SHA for 2f62a0a
src/lightning/pytorch/plugins/precision/fsdp.py
@@ -84,7 +84,7 @@ def convert_module(self, module: Module) -> Module:
84
@override
85
def clip_grad_by_norm(self, module: Optional[Module], optimizer: Optimizer, clip_val: Union[int, float]) -> None:
86
# see https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.clip_grad_norm_
87
- if module is None or not hasattr(module, "clip_grad_norm_") or not isinstance(module.clip_grad_norm_, callable):
+ if module is None:
88
return
89
module.clip_grad_norm_(clip_val)
90
0 commit comments