We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent bce69ca commit 0df38f5Copy full SHA for 0df38f5
src/lightning/pytorch/plugins/precision/fsdp.py
@@ -84,6 +84,8 @@ def convert_module(self, module: Module) -> Module:
84
@override
85
def clip_grad_by_norm(self, module: Optional[Module], optimizer: Optimizer, clip_val: Union[int, float]) -> None:
86
# see https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.clip_grad_norm_
87
+ if module is None or not hasattr(module, "clip_grad_norm_") or not isinstance(module.clip_grad_norm_, Callable):
88
+ return
89
module.clip_grad_norm_(clip_val)
90
91
@property
0 commit comments