Skip to content

Commit b085fa1

Browse files
authored
Rename leftover definitions in Lite tests (#16309)
1 parent 1bef5c8 commit b085fa1

File tree

21 files changed

+378
-377
lines changed

21 files changed

+378
-377
lines changed

src/pytorch_lightning/accelerators/tpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
import torch
1717

1818
from lightning_fabric.accelerators.tpu import _parse_tpu_devices, _XLA_AVAILABLE
19-
from lightning_fabric.accelerators.tpu import TPUAccelerator as LiteTPUAccelerator
19+
from lightning_fabric.accelerators.tpu import TPUAccelerator as FabricTPUAccelerator
2020
from lightning_fabric.utilities.types import _DEVICE
2121
from pytorch_lightning.accelerators.accelerator import Accelerator
2222

@@ -74,7 +74,7 @@ def auto_device_count() -> int:
7474

7575
@staticmethod
7676
def is_available() -> bool:
77-
return LiteTPUAccelerator.is_available()
77+
return FabricTPUAccelerator.is_available()
7878

7979
@classmethod
8080
def register_accelerators(cls, accelerator_registry: Dict) -> None:

src/pytorch_lightning/plugins/precision/precision_plugin.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@
2121
from torch.optim import Optimizer
2222

2323
import pytorch_lightning as pl
24-
from lightning_fabric.plugins import Precision as LitePrecision
24+
from lightning_fabric.plugins import Precision as FabricPrecision
2525
from lightning_fabric.utilities.types import Steppable
2626
from pytorch_lightning.core.hooks import CheckpointHooks
2727
from pytorch_lightning.utilities import grad_norm, GradClipAlgorithmType
2828

2929

30-
class PrecisionPlugin(LitePrecision, CheckpointHooks):
30+
class PrecisionPlugin(FabricPrecision, CheckpointHooks):
3131
"""Base class for all plugins handling the precision-specific parts of the training.
3232
3333
The class attribute precision must be overwritten in child classes. The default value reflects fp32 training.

src/pytorch_lightning/strategies/colossalai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ def optimizer_step(
406406
**kwargs: Any,
407407
) -> Any:
408408
model = model or self.lightning_module
409-
# TODO(lite): remove assertion once strategy's optimizer_step typing is fixed
409+
# TODO(fabric): remove assertion once strategy's optimizer_step typing is fixed
410410
assert isinstance(model, pl.LightningModule)
411411
return self.precision_plugin.optimizer_step(
412412
optimizer, model=model, optimizer_idx=opt_idx, closure=closure, **kwargs

src/pytorch_lightning/strategies/deepspeed.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,7 @@ def _format_config(self) -> None:
608608
self._format_precision_config()
609609

610610
def _format_batch_size_and_grad_accum_config(self) -> None:
611-
# todo: using lite, we do not support these variables within the config
611+
# TODO: Using Fabric, we do not support these variables within the config
612612
assert isinstance(self.config, dict)
613613
if self.lightning_module is None:
614614
return

src/pytorch_lightning/strategies/strategy.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ def optimizer_step(
229229
\**kwargs: Keyword arguments to to ``optimizer.step``
230230
"""
231231
model = model or self.lightning_module
232-
# TODO(lite): remove assertion once strategy's optimizer_step typing is fixed
232+
# TODO(fabric): remove assertion once strategy's optimizer_step typing is fixed
233233
assert isinstance(model, pl.LightningModule)
234234
return self.precision_plugin.optimizer_step(
235235
optimizer, model=model, optimizer_idx=opt_idx, closure=closure, **kwargs
@@ -241,19 +241,19 @@ def _setup_model_and_optimizers(self, model: Module, optimizers: List[Optimizer]
241241
The returned objects are expected to be in the same order they were passed in. The default implementation will
242242
call :meth:`_setup_model` and :meth:`_setup_optimizer` on the inputs.
243243
"""
244-
# TODO: standardize this across all plugins in Lightning and Lite. Related refactor: #7324
244+
# TODO: standardize this across all plugins in Lightning and Fabric. Related refactor: #7324
245245
model = self._setup_model(model)
246246
optimizers = [self._setup_optimizer(optimizer) for optimizer in optimizers]
247247
return model, optimizers
248248

249249
def _setup_model(self, model: Module) -> Module:
250250
"""Performs setup for the model, e.g., by wrapping it by another class."""
251-
# TODO: standardize this across all plugins in Lightning and Lite. Related refactor: #7324
251+
# TODO: standardize this across all plugins in Lightning and Fabric. Related refactor: #7324
252252
return model
253253

254254
def _setup_optimizer(self, optimizer: Optimizer) -> Optimizer:
255255
"""Performs setup for the optimizer, e.g., by wrapping it by another class."""
256-
# TODO: standardize this across all plugins in Lightning and Lite. Related refactor: #7324
256+
# TODO: standardize this across all plugins in Lightning and Fabric. Related refactor: #7324
257257
return optimizer
258258

259259
def batch_to_device(self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0) -> Any:

src/pytorch_lightning/strategies/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def on_colab_kaggle() -> bool:
3434

3535

3636
def _call_register_strategies(registry: _StrategyRegistry, base_module: str) -> None:
37-
# TODO(lite): Remove this function once PL strategies inherit from Lite's Strategy base class
37+
# TODO(fabric): Remove this function once PL strategies inherit from Fabrics Strategy base class
3838
module = importlib.import_module(base_module)
3939
for _, mod in getmembers(module, isclass):
4040
if issubclass(mod, Strategy) and _is_register_method_overridden(mod, Strategy, "register_strategies"):

src/pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ def _init_strategy(self) -> None:
684684
if isinstance(self._strategy_flag, str):
685685
self.strategy = StrategyRegistry.get(self._strategy_flag)
686686
elif isinstance(self._strategy_flag, Strategy):
687-
# TODO(lite): remove ignore after merging lite and PL strategies
687+
# TODO(fabric): remove ignore after merging Fabric and PL strategies
688688
self.strategy = self._strategy_flag # type: ignore[assignment]
689689
else:
690690
raise RuntimeError(f"{self.strategy} is not valid type: {self.strategy}")

src/pytorch_lightning/trainer/trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1681,7 +1681,7 @@ def accelerator(self) -> Accelerator:
16811681

16821682
@property
16831683
def strategy(self) -> Strategy:
1684-
# TODO(lite): remove ignore after merging lite and PL strategies
1684+
# TODO(fabric): remove ignore after merging Fabric and PL strategies
16851685
return self._accelerator_connector.strategy # type: ignore[return-value]
16861686

16871687
@property

src/pytorch_lightning/utilities/imports.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
_PYTHON_GREATER_EQUAL_3_8_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 8)
2222
_PYTHON_GREATER_EQUAL_3_10_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 10)
2323
_TORCH_LESSER_EQUAL_1_10_2 = compare_version("torch", operator.le, "1.10.2")
24-
# duplicated from lite because HPU is patching it below
24+
# duplicated from fabric because HPU is patching it below
2525
_TORCH_GREATER_EQUAL_1_13 = compare_version("torch", operator.ge, "1.13.0")
2626

2727
_HABANA_FRAMEWORK_AVAILABLE = package_available("habana_frameworks")

tests/tests_fabric/accelerators/test_tpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,11 @@ def test_availability():
2929
@pytest.mark.parametrize(
3030
"devices,expected",
3131
[
32-
(0, []), # TODO(lite): This should raise an exception
32+
(0, []), # TODO(fabric): This should raise an exception
3333
(1, [0]),
3434
(2, [0, 1]),
3535
(3, [0, 1, 2]),
36-
("anything-else", "anything-else"), # TODO(lite): This should raise an exception
36+
("anything-else", "anything-else"), # TODO(fabric): This should raise an exception
3737
],
3838
)
3939
def test_get_parallel_devices(devices, expected):

0 commit comments

Comments
 (0)