Skip to content

Commit aff6b1b

Browse files
committed
Merge branch 'master' into HEAD
2 parents db52733 + f95ba20 commit aff6b1b

File tree

21 files changed

+463
-18
lines changed

21 files changed

+463
-18
lines changed

.github/workflows/ci_test-conda.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ jobs:
3131
python ./requirements/adjust_versions.py requirements/extra.txt
3232
python ./requirements/adjust_versions.py requirements/examples.txt
3333
pip install --requirement requirements/devel.txt --find-links https://download.pytorch.org/whl/nightly/torch_nightly.html
34+
pip install pytest-random-order
3435
pip list
3536
3637
- name: Pull checkpoints from S3
@@ -44,7 +45,7 @@ jobs:
4445
- name: Tests
4546
run: |
4647
# NOTE: run coverage on tests does not propagate failure status for Win, https://github.com/nedbat/coveragepy/issues/1003
47-
coverage run --source pytorch_lightning -m pytest pytorch_lightning tests -v --durations=50 --junitxml=junit/test-results-${{ runner.os }}-torch${{ matrix.pytorch-version }}.xml
48+
coverage run --source pytorch_lightning -m pytest --random-order-seed=1 pytorch_lightning tests -v --durations=50 --junitxml=junit/test-results-${{ runner.os }}-torch${{ matrix.pytorch-version }}.xml
4849
shell: bash -l {0}
4950

5051
- name: Upload pytest results

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
195195
- Added `strategy` argument to Trainer ([#8597](https://github.com/PyTorchLightning/pytorch-lightning/pull/8597))
196196

197197

198+
- Added `init_meta_context`, `materialize_module` utilities ([#9920](https://github.com/PyTorchLightning/pytorch-lightning/pull/9920))
199+
200+
198201
- Added `TPUPrecisionPlugin` ([#10020](https://github.com/PyTorchLightning/pytorch-lightning/pull/#10020))
199202

200203

docs/source/advanced/mixed_precision.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ BFloat16 Mixed precision is similar to FP16 mixed precision, however we maintain
5050
Since BFloat16 is more stable than FP16 during training, we do not need to worry about any gradient scaling or nan gradient values that comes with using FP16 mixed precision.
5151

5252
.. testcode::
53-
:skipif: not _TORCH_GREATER_EQUAL_DEV_1_10
53+
:skipif: not _TORCH_GREATER_EQUAL_DEV_1_10 or not torch.cuda.is_available()
5454

5555
Trainer(gpus=1, precision="bf16")
5656

docs/source/common/trainer.rst

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,7 +516,9 @@ Example::
516516
checkpoint_callback
517517
^^^^^^^^^^^^^^^^^^^
518518

519-
Deprecated: This has been deprecated in v1.5 and will be removed in v1.7. Please use ``enable_checkpointing`` instead.
519+
.. warning:: `checkpoint_callback` has been deprecated in v1.5 and will be removed in v1.7.
520+
To disable checkpointing, pass ``enable_checkpointing = False`` to the Trainer instead.
521+
520522

521523
default_root_dir
522524
^^^^^^^^^^^^^^^^

docs/source/extensions/callbacks.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,10 @@ Examples
7272
--------
7373
You can do pretty much anything with callbacks.
7474

75-
- `Add a MLP to fine-tune self-supervised networks <https://lightning-bolts.readthedocs.io/en/latest/self_supervised_callbacks.html#sslonlineevaluator>`_.
76-
- `Find how to modify an image input to trick the classification result <https://lightning-bolts.readthedocs.io/en/latest/vision_callbacks.html#confused-logit>`_.
77-
- `Interpolate the latent space of any variational model <https://lightning-bolts.readthedocs.io/en/latest/variational_callbacks.html#latent-dim-interpolator>`_.
78-
- `Log images to Tensorboard for any model <https://lightning-bolts.readthedocs.io/en/latest/vision_callbacks.html#tensorboard-image-generator>`_.
75+
- `Add a MLP to fine-tune self-supervised networks <https://lightning-bolts.readthedocs.io/en/latest/deprecated/callbacks/self_supervised.html#sslonlineevaluator>`_.
76+
- `Find how to modify an image input to trick the classification result <https://lightning-bolts.readthedocs.io/en/latest/deprecated/callbacks/vision.html#confused-logit>`_.
77+
- `Interpolate the latent space of any variational model <https://lightning-bolts.readthedocs.io/en/latest/deprecated/callbacks/variational.html#latent-dim-interpolator>`_.
78+
- `Log images to Tensorboard for any model <https://lightning-bolts.readthedocs.io/en/latest/deprecated/callbacks/vision.html#tensorboard-image-generator>`_.
7979

8080

8181
--------------

pytorch_lightning/__about__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import time
22

33
_this_year = time.strftime("%Y")
4-
__version__ = "1.5.0rc0"
4+
__version__ = "1.5.0rc1"
55
__author__ = "William Falcon et al."
66
__author_email__ = "[email protected]"
77
__license__ = "Apache-2.0"

pytorch_lightning/core/lightning.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
from pytorch_lightning.core.saving import ModelIO
3939
from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator
4040
from pytorch_lightning.utilities import (
41+
_IS_WINDOWS,
4142
_TORCH_GREATER_EQUAL_DEV_1_10,
4243
GradClipAlgorithmType,
4344
rank_zero_deprecation,
@@ -2041,7 +2042,7 @@ def _register_sharded_tensor_state_dict_hooks_if_available(self) -> None:
20412042
20422043
These hooks ensure that ShardedTensors are included when saving, and are loaded the LightningModule correctly.
20432044
"""
2044-
if not _TORCH_GREATER_EQUAL_DEV_1_10:
2045+
if not _TORCH_GREATER_EQUAL_DEV_1_10 or _IS_WINDOWS:
20452046
return
20462047

20472048
from torch.distributed._sharded_tensor import pre_load_state_dict_hook, state_dict_hook

pytorch_lightning/plugins/training_type/ddp.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
from pytorch_lightning.utilities import (
4343
_FAIRSCALE_AVAILABLE,
4444
_HYDRA_AVAILABLE,
45+
_IS_WINDOWS,
4546
_TORCH_GREATER_EQUAL_1_7,
4647
_TORCH_GREATER_EQUAL_1_8,
4748
_TORCH_GREATER_EQUAL_1_9,
@@ -57,7 +58,9 @@
5758
from pytorch_lightning.utilities.types import STEP_OUTPUT
5859

5960
if _TORCH_GREATER_EQUAL_1_10:
60-
from torch.distributed.optim import DistributedOptimizer, PostLocalSGDOptimizer, ZeroRedundancyOptimizer
61+
if not _IS_WINDOWS:
62+
from torch.distributed.optim import DistributedOptimizer
63+
from torch.distributed.optim import PostLocalSGDOptimizer, ZeroRedundancyOptimizer
6164

6265
if _FAIRSCALE_AVAILABLE:
6366
from fairscale.optim import OSS
@@ -333,8 +336,9 @@ def _reinit_optimizers_with_post_localSGD(self, warmup_steps: int):
333336
if isinstance(optimizer, LightningOptimizer):
334337
optimizer = optimizer._optimizer
335338

339+
is_distributed_optimizer = isinstance(optimizer, DistributedOptimizer) if not _IS_WINDOWS else False
336340
if (
337-
isinstance(optimizer, DistributedOptimizer)
341+
is_distributed_optimizer
338342
or isinstance(optimizer, ZeroRedundancyOptimizer)
339343
or (_FAIRSCALE_AVAILABLE and isinstance(optimizer, OSS))
340344
):

pytorch_lightning/plugins/training_type/deepspeed.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ def _setup_model_and_optimizer(
426426
def init_deepspeed(self):
427427
# check that `configure_gradient_clipping` hook isn't overriden since deepspeed handles
428428
# gradient clipping internally
429-
if is_overridden("configure_gradient_clipping", self.lightning_module):
429+
if is_overridden("configure_gradient_clipping", self.lightning_module, pl.LightningModule):
430430
rank_zero_warn(
431431
"Since deepspeed handles gradient clipping internally, this hook will"
432432
" be ignored. Consider setting `gradient_clip_val` and `gradient_clip_algorithm`"

pytorch_lightning/plugins/training_type/sharded.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@ def _setup_models_and_optimizers(
7373

7474
optimizers = self._wrap_optimizers(optimizers)
7575
model = ShardedDataParallel(models[0], sharded_optimizer=optimizers, **self._ddp_kwargs)
76-
setattr(model, "require_backward_grad_sync", False) # TODO: needed?
7776
return [model], optimizers
7877

7978
def _reinit_optimizers_with_oss(self, optimizers: List[Union[Optimizer, LightningOptimizer]]) -> List["OSS"]:

0 commit comments

Comments
 (0)