We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 3b87924 commit 17c92fdCopy full SHA for 17c92fd
src/lightning/pytorch/plugins/precision/fsdp.py
@@ -97,9 +97,8 @@ def mixed_precision_config(self) -> "TorchMixedPrecision":
97
98
if self.precision in ("16-true", "bf16-true"):
99
rank_zero_warn(
100
- f"FSDPPrecision `{self.precision}` enables mixed-precision execution. "
101
- "Model parameters remain in full precision `torch.float32`, while forward and backward passes "
102
- f"run with reduced precision `{self._desired_input_dtype}` for speed and memory efficiency."
+ f"FSDP with `{self.precision}` enables computation in lower precision. "
+ "FSDP will always retain a full-precision copy of the model parameters for sharding."
103
)
104
105
if self.precision in ("16-true", "16-mixed"):
0 commit comments