We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 59d93ef commit 3b87924Copy full SHA for 3b87924
src/lightning/fabric/plugins/precision/fsdp.py
@@ -87,9 +87,8 @@ def mixed_precision_config(self) -> "TorchMixedPrecision":
87
88
if self.precision in ("16-true", "bf16-true"):
89
rank_zero_warn(
90
- f"FSDPPrecision `{self.precision}` enables mixed-precision execution. "
91
- "Model parameters remain in full precision `torch.float32`, while forward and backward passes "
92
- f"run with reduced precision `{self._desired_input_dtype}` for speed and memory efficiency."
+ f"FSDP with `{self.precision}` enables computation in lower precision. "
+ "FSDP will always retain a full-precision copy of the model parameters for sharding."
93
)
94
95
if self.precision in ("16-true", "16-mixed"):
0 commit comments