Skip to content

Commit 939d56c

Browse files
authored
Drop PyTorch 1.7 support (#12432)
1 parent ef8f49a commit 939d56c

File tree

60 files changed

+150
-273
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+150
-273
lines changed

CHANGELOG.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
175175

176176
### Changed
177177

178-
- Drop PyTorch 1.7 support ([#12191](https://github.com/PyTorchLightning/pytorch-lightning/pull/12191))
178+
- Drop PyTorch 1.7 support ([#12191](https://github.com/PyTorchLightning/pytorch-lightning/pull/12191)), ([#12432](https://github.com/PyTorchLightning/pytorch-lightning/pull/12432))
179179

180180

181181
- Make `benchmark` flag optional and set its value based on the deterministic flag ([#11944](https://github.com/PyTorchLightning/pytorch-lightning/pull/11944))

docs/source/advanced/fault_tolerant_training.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ This would make benchmarking non-reproducible as optimization has been interrupt
1313
With Fault Tolerant Training, when ``Trainer.fit()`` fails in the middle of an epoch during training or validation,
1414
Lightning will restart exactly where it failed, and everything will be restored.
1515

16-
Fault Tolerance requires PyTorch 1.7 or higher and can be enabled as follows:
16+
Fault tolerance can be enabled as follows:
1717

1818
.. code-block:: bash
1919

pytorch_lightning/accelerators/gpu.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
from pytorch_lightning.accelerators.accelerator import Accelerator
2424
from pytorch_lightning.utilities import device_parser
2525
from pytorch_lightning.utilities.exceptions import MisconfigurationException
26-
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_8
2726
from pytorch_lightning.utilities.types import _DEVICE
2827

2928
_log = logging.getLogger(__name__)
@@ -70,9 +69,7 @@ def get_device_stats(self, device: _DEVICE) -> Dict[str, Any]:
7069
FileNotFoundError:
7170
If nvidia-smi installation not found
7271
"""
73-
if _TORCH_GREATER_EQUAL_1_8:
74-
return torch.cuda.memory_stats(device)
75-
return get_nvidia_gpu_stats(device)
72+
return torch.cuda.memory_stats(device)
7673

7774
@staticmethod
7875
def parse_devices(devices: Union[int, str, List[int]]) -> Optional[List[int]]:
@@ -102,7 +99,7 @@ def register_accelerators(cls, accelerator_registry: Dict) -> None:
10299
)
103100

104101

105-
def get_nvidia_gpu_stats(device: _DEVICE) -> Dict[str, float]:
102+
def get_nvidia_gpu_stats(device: _DEVICE) -> Dict[str, float]: # pragma: no-cover
106103
"""Get GPU stats including memory, fan speed, and temperature from nvidia-smi.
107104
108105
Args:

pytorch_lightning/callbacks/quantization.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,14 +22,7 @@
2222

2323
import torch
2424
from torch import Tensor
25-
26-
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_8
27-
28-
if _TORCH_GREATER_EQUAL_1_8:
29-
from torch.quantization import FakeQuantizeBase
30-
else:
31-
# For torch 1.7.
32-
from torch.quantization import FakeQuantize as FakeQuantizeBase
25+
from torch.quantization import FakeQuantizeBase
3326

3427
import pytorch_lightning as pl
3528
from pytorch_lightning.callbacks.base import Callback

pytorch_lightning/core/lightning.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ class LightningModule(
6666
CheckpointHooks,
6767
Module,
6868
):
69-
# Below is for property support of JIT in PyTorch 1.7
69+
# Below is for property support of JIT
7070
# since none of these are important when using JIT, we are going to ignore them.
7171
__jit_unused_properties__ = (
7272
[

pytorch_lightning/profiler/xla.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
from typing import Dict
1616

1717
from pytorch_lightning.profiler.profiler import Profiler
18-
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_8, _TPU_AVAILABLE
18+
from pytorch_lightning.utilities import _TPU_AVAILABLE
1919
from pytorch_lightning.utilities.exceptions import MisconfigurationException
2020

21-
if _TPU_AVAILABLE and _TORCH_GREATER_EQUAL_1_8:
21+
if _TPU_AVAILABLE:
2222
import torch_xla.debug.profiler as xp
2323

2424
log = logging.getLogger(__name__)
@@ -45,8 +45,6 @@ def __init__(self, port: int = 9012) -> None:
4545
"""
4646
if not _TPU_AVAILABLE:
4747
raise MisconfigurationException("`XLAProfiler` is only supported on TPUs")
48-
if not _TORCH_GREATER_EQUAL_1_8:
49-
raise MisconfigurationException("`XLAProfiler` is only supported with `torch-xla >= 1.8`")
5048
super().__init__(dirpath=None, filename=None)
5149
self.port = port
5250
self._recording_map: Dict = {}

pytorch_lightning/strategies/ddp.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -42,12 +42,16 @@
4242
get_default_process_group_backend_for_device,
4343
)
4444
from pytorch_lightning.utilities.distributed import group as _group
45-
from pytorch_lightning.utilities.distributed import init_dist_connection, ReduceOp, sync_ddp_if_available
45+
from pytorch_lightning.utilities.distributed import (
46+
init_dist_connection,
47+
ReduceOp,
48+
register_ddp_comm_hook,
49+
sync_ddp_if_available,
50+
)
4651
from pytorch_lightning.utilities.exceptions import DeadlockDetectedException
4752
from pytorch_lightning.utilities.imports import (
4853
_FAIRSCALE_AVAILABLE,
4954
_IS_WINDOWS,
50-
_TORCH_GREATER_EQUAL_1_8,
5155
_TORCH_GREATER_EQUAL_1_9,
5256
_TORCH_GREATER_EQUAL_1_10,
5357
_TORCH_GREATER_EQUAL_1_11,
@@ -58,8 +62,6 @@
5862

5963
if _FAIRSCALE_AVAILABLE:
6064
from fairscale.optim import OSS
61-
if _TORCH_GREATER_EQUAL_1_8:
62-
from pytorch_lightning.utilities.distributed import register_ddp_comm_hook
6365
if _TORCH_GREATER_EQUAL_1_10:
6466
from torch.distributed.algorithms.model_averaging.averagers import ModelAverager
6567

@@ -213,9 +215,7 @@ def _register_ddp_hooks(self) -> None:
213215
log.detail(f"{self.__class__.__name__}: registering ddp hooks")
214216
# In 1.8, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode
215217
# Since 1.9, DDP communication hooks can work on all backends.
216-
if _TORCH_GREATER_EQUAL_1_9 or (
217-
_TORCH_GREATER_EQUAL_1_8 and self.root_device.type == "cuda" and self._is_single_process_single_device
218-
):
218+
if _TORCH_GREATER_EQUAL_1_9 or (self.root_device.type == "cuda" and self._is_single_process_single_device):
219219
register_ddp_comm_hook(
220220
model=self.model,
221221
ddp_comm_state=self._ddp_comm_state,
@@ -302,7 +302,7 @@ def determine_ddp_device_ids(self):
302302
def barrier(self, *args, **kwargs) -> None:
303303
if not distributed_available():
304304
return
305-
if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl":
305+
if torch.distributed.get_backend() == "nccl":
306306
torch.distributed.barrier(device_ids=self.determine_ddp_device_ids())
307307
else:
308308
torch.distributed.barrier()

pytorch_lightning/strategies/ddp_spawn.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,15 +35,17 @@
3535
get_default_process_group_backend_for_device,
3636
)
3737
from pytorch_lightning.utilities.distributed import group as _group
38-
from pytorch_lightning.utilities.distributed import init_dist_connection, ReduceOp, sync_ddp_if_available
39-
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_8, _TORCH_GREATER_EQUAL_1_11
38+
from pytorch_lightning.utilities.distributed import (
39+
init_dist_connection,
40+
ReduceOp,
41+
register_ddp_comm_hook,
42+
sync_ddp_if_available,
43+
)
44+
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_11
4045
from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_only
4146
from pytorch_lightning.utilities.seed import reset_seed
4247
from pytorch_lightning.utilities.types import STEP_OUTPUT
4348

44-
if _TORCH_GREATER_EQUAL_1_8:
45-
from pytorch_lightning.utilities.distributed import register_ddp_comm_hook
46-
4749
log = logging.getLogger(__name__)
4850

4951

@@ -171,7 +173,7 @@ def pre_configure_ddp(self):
171173
def _register_ddp_hooks(self) -> None:
172174
# currently, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode
173175
# https://github.com/pytorch/pytorch/blob/v1.8.0/torch/nn/parallel/distributed.py#L1080-L1084
174-
if _TORCH_GREATER_EQUAL_1_8 and self.root_device.type == "cuda" and self._is_single_process_single_device:
176+
if self.root_device.type == "cuda" and self._is_single_process_single_device:
175177
register_ddp_comm_hook(
176178
model=self.model,
177179
ddp_comm_state=self._ddp_comm_state,
@@ -192,7 +194,7 @@ def determine_ddp_device_ids(self):
192194
def barrier(self, *args, **kwargs) -> None:
193195
if not distributed_available():
194196
return
195-
if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl":
197+
if torch.distributed.get_backend() == "nccl":
196198
torch.distributed.barrier(device_ids=self.determine_ddp_device_ids())
197199
else:
198200
torch.distributed.barrier()

pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -79,13 +79,7 @@
7979
rank_zero_warn,
8080
)
8181
from pytorch_lightning.utilities.exceptions import MisconfigurationException
82-
from pytorch_lightning.utilities.imports import (
83-
_HOROVOD_AVAILABLE,
84-
_HPU_AVAILABLE,
85-
_IPU_AVAILABLE,
86-
_TORCH_GREATER_EQUAL_1_8,
87-
_TPU_AVAILABLE,
88-
)
82+
from pytorch_lightning.utilities.imports import _HOROVOD_AVAILABLE, _HPU_AVAILABLE, _IPU_AVAILABLE, _TPU_AVAILABLE
8983

9084
log = logging.getLogger(__name__)
9185

@@ -217,10 +211,7 @@ def __init__(
217211

218212
def _init_deterministic(self, deterministic: bool) -> None:
219213
self.deterministic = deterministic
220-
if _TORCH_GREATER_EQUAL_1_8:
221-
torch.use_deterministic_algorithms(deterministic)
222-
else:
223-
torch.set_deterministic(deterministic)
214+
torch.use_deterministic_algorithms(deterministic)
224215
if deterministic:
225216
# fixing non-deterministic part of horovod
226217
# https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383

pytorch_lightning/trainer/connectors/data_connector.py

Lines changed: 6 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -220,35 +220,19 @@ def _worker_check(self, dataloader: DataLoader, name: str) -> None:
220220

221221
# ddp_spawn + num_workers > 0 don't mix! tell the user
222222
if dataloader.num_workers > 0 and using_spawn:
223-
# checks for the attr persistent_workers available in pytorch >= 1.7
224-
if hasattr(dataloader, "persistent_workers"):
225-
if not dataloader.persistent_workers:
226-
rank_zero_warn(
227-
"num_workers>0, persistent_workers=False, and strategy=ddp_spawn"
228-
" may result in data loading bottlenecks."
229-
" Consider setting persistent_workers=True"
230-
" (this is a limitation of Python .spawn() and PyTorch)"
231-
)
232-
else:
223+
if not dataloader.persistent_workers:
233224
rank_zero_warn(
234-
"num_workers>0 and strategy=ddp_spawn do not mix well"
235-
" and may result in data loading bottlenecks."
236-
" Consider setting strategy=ddp to use num_workers>0"
225+
"num_workers>0, persistent_workers=False, and strategy=ddp_spawn"
226+
" may result in data loading bottlenecks."
227+
" Consider setting persistent_workers=True"
237228
" (this is a limitation of Python .spawn() and PyTorch)"
238229
)
239230

240231
elif dataloader.num_workers == 0 and using_spawn:
241-
# checks for the attr persistent_workers available in pytorch >= 1.7
242-
if hasattr(dataloader, "persistent_workers"):
243-
if not dataloader.persistent_workers:
244-
rank_zero_warn(
245-
"strategy=ddp_spawn and num_workers=0 may result in data loading bottlenecks."
246-
" Consider setting num_workers>0 and persistent_workers=True"
247-
)
248-
else:
232+
if not dataloader.persistent_workers:
249233
rank_zero_warn(
250234
"strategy=ddp_spawn and num_workers=0 may result in data loading bottlenecks."
251-
" Consider setting strategy=ddp and set num_workers>0"
235+
" Consider setting num_workers>0 and persistent_workers=True"
252236
)
253237

254238
elif dataloader.num_workers <= 2 < num_cpus and not using_spawn:

0 commit comments

Comments
 (0)