Skip to content

Commit 9f9c4ac

Browse files
committed
autofix
1 parent f1ed6a2 commit 9f9c4ac

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+54
-103
lines changed

pyproject.toml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,8 +101,6 @@ ignore = [
101101
"S603", # todo: `subprocess` call: check for execution of untrusted input
102102
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`
103103
"S607", # todo: Starting a process with a partial executable path
104-
"RET504", # todo:Unnecessary variable assignment before `return` statement
105-
"RET503",
106104
]
107105
"tests/**" = [
108106
"S101", # Use of `assert` detected
@@ -118,7 +116,6 @@ ignore = [
118116
"S603", # todo: `subprocess` call: check for execution of untrusted input
119117
"S605", # todo: Starting a process with a shell: seems safe, but may be changed in the future; consider rewriting without `shell`
120118
"S607", # todo: Starting a process with a partial executable path
121-
"RET504", # todo:Unnecessary variable assignment before `return` statement
122119
"PT004", # todo: Fixture `tmpdir_unittest_fixture` does not return anything, add leading underscore
123120
"PT012", # todo: `pytest.raises()` block should contain a single simple statement
124121
"PT019", # todo: Fixture `_` without value is injected as parameter, use `@pytest.mark.usefixtures` instead

src/lightning/fabric/fabric.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -476,8 +476,7 @@ def _setup_dataloader(
476476
dataloader = self._strategy.process_dataloader(dataloader)
477477
device = self.device if move_to_device and not isinstance(self._strategy, XLAStrategy) else None
478478
fabric_dataloader = _FabricDataLoader(dataloader=dataloader, device=device)
479-
fabric_dataloader = cast(DataLoader, fabric_dataloader)
480-
return fabric_dataloader
479+
return cast(DataLoader, fabric_dataloader)
481480

482481
def backward(self, tensor: Tensor, *args: Any, model: Optional[_FabricModule] = None, **kwargs: Any) -> None:
483482
r"""Replaces ``loss.backward()`` in your training loop. Handles precision automatically for you.

src/lightning/fabric/loggers/tensorboard.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,7 @@ def log_dir(self) -> str:
157157
if isinstance(self.sub_dir, str):
158158
log_dir = os.path.join(log_dir, self.sub_dir)
159159
log_dir = os.path.expandvars(log_dir)
160-
log_dir = os.path.expanduser(log_dir)
161-
return log_dir
160+
return os.path.expanduser(log_dir)
162161

163162
@property
164163
def sub_dir(self) -> Optional[str]:

src/lightning/fabric/plugins/precision/transformer_engine.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,7 @@ def convert_module(self, module: torch.nn.Module) -> torch.nn.Module:
103103
)
104104
elif self.replace_layers in (None, True):
105105
_convert_layers(module)
106-
module = module.to(dtype=self.weights_dtype)
107-
return module
106+
return module.to(dtype=self.weights_dtype)
108107

109108
@override
110109
def tensor_init_context(self) -> AbstractContextManager:

src/lightning/fabric/strategies/fsdp.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -801,13 +801,12 @@ def _get_sharded_state_dict_context(module: Module) -> Generator[None, None, Non
801801

802802
state_dict_config = ShardedStateDictConfig(offload_to_cpu=True)
803803
optim_state_dict_config = ShardedOptimStateDictConfig(offload_to_cpu=True)
804-
state_dict_type_context = FSDP.state_dict_type(
804+
return FSDP.state_dict_type(
805805
module=module,
806806
state_dict_type=StateDictType.SHARDED_STATE_DICT,
807807
state_dict_config=state_dict_config,
808808
optim_state_dict_config=optim_state_dict_config,
809809
)
810-
return state_dict_type_context # type: ignore[return-value]
811810

812811

813812
def _get_full_state_dict_context(
@@ -819,15 +818,13 @@ def _get_full_state_dict_context(
819818

820819
state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=rank0_only)
821820
optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=rank0_only)
822-
state_dict_type_context = FSDP.state_dict_type(
821+
return FSDP.state_dict_type(
823822
module=module,
824823
state_dict_type=StateDictType.FULL_STATE_DICT,
825824
state_dict_config=state_dict_config,
826825
optim_state_dict_config=optim_state_dict_config,
827826
)
828827

829-
return state_dict_type_context # type: ignore[return-value]
830-
831828

832829
def _is_sharded_checkpoint(path: Path) -> bool:
833830
"""A heuristic check to determine whether the path points to a directory with checkpoint shards."""

src/lightning/fabric/strategies/parallel.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,7 @@ def reduce_boolean_decision(self, decision: bool, all: bool = True) -> bool:
104104
decision,
105105
reduce_op=ReduceOp.SUM, # type: ignore[arg-type]
106106
)
107-
decision = bool(decision == self.world_size) if all else bool(decision)
108-
return decision
107+
return bool(decision == self.world_size) if all else bool(decision)
109108

110109
@override
111110
def teardown(self) -> None:

src/lightning/fabric/strategies/xla.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -200,8 +200,7 @@ def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bo
200200
import torch_xla.core.xla_model as xm
201201

202202
tensor = xf.all_gather(tensor) if sync_grads else xm.all_gather(tensor)
203-
tensor = tensor.to(original_device)
204-
return tensor
203+
return tensor.to(original_device)
205204

206205
@override
207206
def all_reduce(

src/lightning/fabric/strategies/xla_fsdp.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -334,8 +334,7 @@ def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bo
334334
import torch_xla.core.xla_model as xm
335335

336336
tensor = xf.all_gather(tensor) if sync_grads else xm.all_gather(tensor)
337-
tensor = tensor.to(original_device)
338-
return tensor
337+
return tensor.to(original_device)
339338

340339
@override
341340
def all_reduce(

src/lightning/fabric/utilities/throughput.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -632,6 +632,7 @@ def get_available_flops(device: torch.device, dtype: Union[torch.dtype, str]) ->
632632
rank_zero_warn(f"FLOPs not found for TPU {device_name!r} with {dtype}")
633633
return None
634634
return int(_TPU_FLOPS[chip])
635+
return None
635636

636637

637638
def _plugin_to_compute_dtype(plugin: "Precision") -> torch.dtype:

src/lightning/pytorch/callbacks/model_checkpoint.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -623,9 +623,7 @@ def check_monitor_top_k(self, trainer: "pl.Trainer", current: Optional[Tensor] =
623623
should_update_best_and_save = monitor_op(current, self.best_k_models[self.kth_best_model_path])
624624

625625
# If using multiple devices, make sure all processes are unanimous on the decision.
626-
should_update_best_and_save = trainer.strategy.reduce_boolean_decision(bool(should_update_best_and_save))
627-
628-
return should_update_best_and_save
626+
return trainer.strategy.reduce_boolean_decision(bool(should_update_best_and_save))
629627

630628
def _format_checkpoint_name(
631629
self,

0 commit comments

Comments
 (0)