Skip to content

Commit 7ef9688

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 17ae571 commit 7ef9688

File tree

12 files changed

+13
-14
lines changed

12 files changed

+13
-14
lines changed

src/lightning/fabric/plugins/precision/bitsandbytes.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ class _Linear8bitLt(bnb.nn.Linear8bitLt):
226226
def __init__(self, *args: Any, device: Optional[_DEVICE] = None, threshold: float = 6.0, **kwargs: Any) -> None:
227227
super().__init__(*args, device=device, threshold=threshold, **kwargs)
228228
self.weight = cast(bnb.nn.Int8Params, self.weight) # type: ignore[has-type]
229-
self.bias: Optional[torch.nn.Parameter] = self.bias
229+
self.bias: Optional[torch.nn.Parameter] = self.bias
230230
# if the device is CUDA or we are under a CUDA context manager, quantize the weight here, so we don't end up
231231
# filling the device memory with float32 weights which could lead to OOM
232232
if torch.tensor(0, device=device).device.type == "cuda":
@@ -310,7 +310,7 @@ class _Linear4bit(bnb.nn.Linear4bit):
310310
def __init__(self, *args: Any, device: Optional[_DEVICE] = None, **kwargs: Any) -> None:
311311
super().__init__(*args, device=device, **kwargs)
312312
self.weight = cast(bnb.nn.Params4bit, self.weight) # type: ignore[has-type]
313-
self.bias: Optional[torch.nn.Parameter] = self.bias
313+
self.bias: Optional[torch.nn.Parameter] = self.bias
314314
# if the device is CUDA or we are under a CUDA context manager, quantize the weight here, so we don't end up
315315
# filling the device memory with float32 weights which could lead to OOM
316316
if torch.tensor(0, device=device).device.type == "cuda":

src/lightning/fabric/strategies/single_xla.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
from lightning.fabric.accelerators import Accelerator
2020
from lightning.fabric.accelerators.xla import _XLA_AVAILABLE
21-
from lightning.fabric.plugins import XLAPrecision, CheckpointIO, Precision
21+
from lightning.fabric.plugins import CheckpointIO, Precision, XLAPrecision
2222
from lightning.fabric.plugins.io.xla import XLACheckpointIO
2323
from lightning.fabric.strategies import _StrategyRegistry
2424
from lightning.fabric.strategies.single_device import SingleDeviceStrategy

src/lightning/fabric/strategies/strategy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def __init__(
5252
self._checkpoint_io: Optional[CheckpointIO] = checkpoint_io
5353
self._precision: Optional[Precision] = None
5454
# Call the precision setter for input validation
55-
self.precision = precision
55+
self.precision = precision
5656
self._launcher: Optional[_Launcher] = None
5757
self._backward_sync_control: Optional[_BackwardSyncControl] = None
5858

src/lightning/fabric/strategies/xla.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424
from lightning.fabric.accelerators import Accelerator
2525
from lightning.fabric.accelerators.xla import _XLA_GREATER_EQUAL_2_1
26-
from lightning.fabric.plugins import XLAPrecision, CheckpointIO, Precision
26+
from lightning.fabric.plugins import CheckpointIO, Precision, XLAPrecision
2727
from lightning.fabric.plugins.environments import XLAEnvironment
2828
from lightning.fabric.plugins.io.xla import XLACheckpointIO
2929
from lightning.fabric.strategies import ParallelStrategy, _StrategyRegistry

src/lightning/fabric/strategies/xla_fsdp.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,8 @@
2626

2727
from lightning.fabric.accelerators import Accelerator
2828
from lightning.fabric.accelerators.xla import _XLA_AVAILABLE
29-
from lightning.fabric.plugins import XLAPrecision
29+
from lightning.fabric.plugins import CheckpointIO, Precision, XLAPrecision
3030
from lightning.fabric.plugins.environments import XLAEnvironment
31-
from lightning.fabric.plugins import CheckpointIO, Precision
3231
from lightning.fabric.plugins.io.xla import XLACheckpointIO
3332
from lightning.fabric.strategies import ParallelStrategy, _StrategyRegistry
3433
from lightning.fabric.strategies.fsdp import _apply_filter

src/lightning/pytorch/core/module.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ def trainer(self) -> "pl.Trainer":
218218
def trainer(self, trainer: Optional["pl.Trainer"]) -> None:
219219
for v in self.children():
220220
if isinstance(v, LightningModule):
221-
v.trainer = trainer
221+
v.trainer = trainer
222222
self._trainer = trainer
223223

224224
@property

src/lightning/pytorch/serve/servable_module_validator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def on_train_start(self, trainer: "pl.Trainer", servable_module: "pl.LightningMo
9393

9494
# Note: The Trainer needs to be detached from the pl_module before starting the process.
9595
# This would fail during the deepcopy with DDP.
96-
servable_module.trainer = None
96+
servable_module.trainer = None
9797

9898
process = Process(target=self._start_server, args=(servable_module, self.host, self.port, self.optimization))
9999
process.start()

src/lightning/pytorch/strategies/fsdp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535

3636
import lightning.pytorch as pl
3737
from lightning.fabric.plugins import CheckpointIO, ClusterEnvironment
38-
from lightning.pytorch.plugins import Precision
3938
from lightning.fabric.plugins.collectives.torch_collective import default_pg_timeout
4039
from lightning.fabric.strategies import _StrategyRegistry
4140
from lightning.fabric.strategies.fsdp import (
@@ -69,6 +68,7 @@
6968
from lightning.fabric.utilities.seed import reset_seed
7069
from lightning.fabric.utilities.types import _PATH, ReduceOp
7170
from lightning.pytorch.core.optimizer import LightningOptimizer
71+
from lightning.pytorch.plugins import Precision
7272
from lightning.pytorch.plugins.precision import Precision
7373
from lightning.pytorch.plugins.precision.fsdp import FSDPPrecision
7474
from lightning.pytorch.strategies.launchers.subprocess_script import _SubprocessScriptLauncher

src/lightning/pytorch/strategies/single_xla.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
import lightning.pytorch as pl
2121
from lightning.fabric.accelerators.xla import _XLA_AVAILABLE
22-
from lightning.fabric.plugins import XLACheckpointIO, CheckpointIO, Precision
22+
from lightning.fabric.plugins import CheckpointIO, Precision, XLACheckpointIO
2323
from lightning.fabric.strategies import _StrategyRegistry
2424
from lightning.fabric.utilities.optimizer import _optimizers_to_device
2525
from lightning.fabric.utilities.types import _DEVICE

src/lightning/pytorch/strategies/strategy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def __init__(
5757
self._checkpoint_io: Optional[CheckpointIO] = checkpoint_io
5858
self._precision_plugin: Optional[Precision] = None
5959
# Call the precision setter for input validation
60-
self.precision_plugin = precision_plugin
60+
self.precision_plugin = precision_plugin
6161
self._lightning_module: Optional[pl.LightningModule] = None
6262
self._model: Optional[Module] = None
6363
self._launcher: Optional[_Launcher] = None

0 commit comments

Comments
 (0)