Skip to content

Commit 8482c28

Browse files
authored
Remove redundant require_backward_grad_sync=False in sharded plugins (#10065)
1 parent c3614f1 commit 8482c28

File tree

2 files changed

+0
-2
lines changed

2 files changed

+0
-2
lines changed

pytorch_lightning/plugins/training_type/sharded.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ def _setup_models_and_optimizers(
7373

7474
optimizers = self._wrap_optimizers(optimizers)
7575
model = ShardedDataParallel(models[0], sharded_optimizer=optimizers, **self._ddp_kwargs)
76-
setattr(model, "require_backward_grad_sync", False) # TODO: needed?
7776
return [model], optimizers
7877

7978
def _reinit_optimizers_with_oss(self, optimizers: List[Union[Optimizer, LightningOptimizer]]) -> List["OSS"]:

pytorch_lightning/plugins/training_type/sharded_spawn.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,6 @@ def _setup_models_and_optimizers(
6464

6565
optimizers = self._wrap_optimizers(optimizers)
6666
model = ShardedDataParallel(models[0], sharded_optimizer=optimizers, **self._ddp_kwargs)
67-
setattr(model, "require_backward_grad_sync", False) # TODO: needed?
6867
return [model], optimizers
6968

7069
def _reinit_optimizers_with_oss(self, optimizers: List[Optimizer]) -> List["OSS"]:

0 commit comments

Comments
 (0)