Skip to content

Commit 7790653

Browse files
authored
Update automatic.py
1 parent 8055717 commit 7790653

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

src/lightning/pytorch/loops/optimization/automatic.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,14 @@
2323
from typing_extensions import override
2424

2525
import lightning.pytorch as pl
26+
from lightning.fabric.utilities.warnings import PossibleUserWarning
2627
from lightning.pytorch.loops.loop import _Loop
2728
from lightning.pytorch.loops.optimization.closure import AbstractClosure, OutputResult
2829
from lightning.pytorch.loops.progress import _OptimizationProgress
2930
from lightning.pytorch.loops.utilities import _block_parallel_sync_behavior
3031
from lightning.pytorch.trainer import call
3132
from lightning.pytorch.utilities.exceptions import MisconfigurationException
32-
from lightning.pytorch.utilities.rank_zero import WarningCache
33+
from lightning.pytorch.utilities.rank_zero import WarningCache, rank_zero_warn
3334
from lightning.pytorch.utilities.types import STEP_OUTPUT
3435

3536

@@ -320,10 +321,11 @@ def _training_step(self, kwargs: OrderedDict) -> ClosureResult:
320321
self.trainer.strategy.post_training_step() # unused hook - call anyway for backward compatibility
321322

322323
if training_step_output is None and trainer.world_size > 1:
323-
raise RuntimeError(
324+
rank_zero_warn(
324325
"Skipping the `training_step` by returning None in distributed training is not supported."
325326
" It is recommended that you rewrite your training logic to avoid having to skip the step in the first"
326327
" place."
328+
category=PossibleUserWarning,
327329
)
328330

329331
return self.output_result_cls.from_training_step_output(training_step_output, trainer.accumulate_grad_batches)

0 commit comments

Comments
 (0)