Skip to content

Commit c6f24d5

Browse files
authored
Merge branch 'master' into rich_progress_bar_vnum
2 parents 8edd91f + 791753b commit c6f24d5

File tree

8 files changed

+96
-22
lines changed

8 files changed

+96
-22
lines changed

.github/CONTRIBUTING.md

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -113,14 +113,28 @@ ______________________________________________________________________
113113

114114
To set up a local development environment, we recommend using `uv`, which can be installed following their [instructions](https://docs.astral.sh/uv/getting-started/installation/).
115115

116-
Once `uv` has been installed, begin by cloning the repository:
116+
Once `uv` has been installed, begin by cloning the forked repository:
117117

118118
```bash
119-
git clone https://github.com/Lightning-AI/lightning.git
120-
cd lightning
119+
git clone https://github.com/{YOUR_GITHUB_USERNAME}/pytorch-lightning.git
120+
cd pytorch-lightning
121121
```
122122

123-
Once in root level of the repository, create a new virtual environment and install the project dependencies.
123+
> If you're using [Lightning Studio](https://lightning.ai) or already have your `uv venv` activated, you can quickly set up the project by running:
124+
125+
```bash
126+
make setup
127+
```
128+
129+
This will:
130+
131+
- Install all required dependencies.
132+
- Perform an editable install of the `pytorch-lightning` project.
133+
- Install and configure `pre-commit`.
134+
135+
#### Manual Setup (Optional)
136+
137+
If you prefer more fine-grained control over the dependencies, you can set up the environment manually:
124138

125139
```bash
126140
uv venv

Makefile

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
.PHONY: test clean docs
1+
.PHONY: test clean docs setup
22

33
# to imitate SLURM set only single node
44
export SLURM_LOCALID=0
@@ -7,6 +7,23 @@ export SPHINX_MOCK_REQUIREMENTS=1
77
# install only Lightning Trainer packages
88
export PACKAGE_NAME=pytorch
99

10+
setup:
11+
uv pip install -r requirements.txt \
12+
-r requirements/pytorch/base.txt \
13+
-r requirements/pytorch/test.txt \
14+
-r requirements/pytorch/extra.txt \
15+
-r requirements/pytorch/strategies.txt \
16+
-r requirements/fabric/base.txt \
17+
-r requirements/fabric/test.txt \
18+
-r requirements/fabric/strategies.txt \
19+
-r requirements/typing.txt \
20+
-e ".[all]" \
21+
pre-commit
22+
pre-commit install
23+
@echo "-----------------------------"
24+
@echo "✅ Environment setup complete. Ready to Contribute ⚡️!"
25+
26+
1027
clean:
1128
# clean all temp runs
1229
rm -rf $(shell find . -name "mlruns")

src/lightning/fabric/CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
1616

1717
-
1818

19+
### Changed
20+
21+
- Raise ValueError when seed is `out-of-bounds` or `cannot be cast to int` ([#21029](https://github.com/Lightning-AI/pytorch-lightning/pull/21029))
22+
1923

2024
---
2125

src/lightning/fabric/utilities/seed.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@ def seed_everything(seed: Optional[int] = None, workers: bool = False, verbose:
2727
Args:
2828
seed: the integer value seed for global random state in Lightning.
2929
If ``None``, it will read the seed from ``PL_GLOBAL_SEED`` env variable. If ``None`` and the
30-
``PL_GLOBAL_SEED`` env variable is not set, then the seed defaults to 0.
30+
``PL_GLOBAL_SEED`` env variable is not set, then the seed defaults to 0. If seed is
31+
not in bounds or cannot be cast to int, a ValueError is raised.
3132
workers: if set to ``True``, will properly configure all dataloaders passed to the
3233
Trainer with a ``worker_init_fn``. If the user already provides such a function
3334
for their dataloaders, setting this argument will have no influence. See also:
@@ -44,14 +45,12 @@ def seed_everything(seed: Optional[int] = None, workers: bool = False, verbose:
4445
try:
4546
seed = int(env_seed)
4647
except ValueError:
47-
seed = 0
48-
rank_zero_warn(f"Invalid seed found: {repr(env_seed)}, seed set to {seed}")
48+
raise ValueError(f"Invalid seed specified via PL_GLOBAL_SEED: {repr(env_seed)}")
4949
elif not isinstance(seed, int):
5050
seed = int(seed)
5151

5252
if not (min_seed_value <= seed <= max_seed_value):
53-
rank_zero_warn(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
54-
seed = 0
53+
raise ValueError(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
5554

5655
if verbose:
5756
log.info(rank_prefixed_message(f"Seed set to {seed}", _get_rank()))

src/lightning/pytorch/CHANGELOG.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
1010

1111
### Added
1212

13-
-
13+
- Added support for general mappings being returned from `training_step` when using manual optimization ([#21011](https://github.com/Lightning-AI/pytorch-lightning/pull/21011))
14+
1415

1516

1617
### Changed

src/lightning/pytorch/loops/optimization/manual.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
from collections import OrderedDict
15+
from collections.abc import Mapping
1516
from contextlib import suppress
1617
from dataclasses import dataclass, field
1718
from typing import Any
@@ -45,7 +46,7 @@ class ManualResult(OutputResult):
4546
@classmethod
4647
def from_training_step_output(cls, training_step_output: STEP_OUTPUT) -> "ManualResult":
4748
extra = {}
48-
if isinstance(training_step_output, dict):
49+
if isinstance(training_step_output, Mapping):
4950
extra = training_step_output.copy()
5051
elif isinstance(training_step_output, Tensor):
5152
extra = {"loss": training_step_output}

tests/tests_fabric/utilities/test_seed.py

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -47,19 +47,29 @@ def test_correct_seed_with_environment_variable():
4747

4848
@mock.patch.dict(os.environ, {"PL_GLOBAL_SEED": "invalid"}, clear=True)
4949
def test_invalid_seed():
50-
"""Ensure that we still fix the seed even if an invalid seed is given."""
51-
with pytest.warns(UserWarning, match="Invalid seed found"):
52-
seed = seed_everything()
53-
assert seed == 0
50+
"""Ensure that a ValueError is raised if an invalid seed is given."""
51+
with pytest.raises(ValueError, match="Invalid seed specified"):
52+
seed_everything()
5453

5554

5655
@mock.patch.dict(os.environ, {}, clear=True)
5756
@pytest.mark.parametrize("seed", [10e9, -10e9])
5857
def test_out_of_bounds_seed(seed):
59-
"""Ensure that we still fix the seed even if an out-of-bounds seed is given."""
60-
with pytest.warns(UserWarning, match="is not in bounds"):
61-
actual = seed_everything(seed)
62-
assert actual == 0
58+
"""Ensure that a ValueError is raised if an out-of-bounds seed is given."""
59+
with pytest.raises(ValueError, match="is not in bounds"):
60+
seed_everything(seed)
61+
62+
63+
def test_seed_everything_accepts_valid_seed_argument():
64+
"""Ensure that seed_everything returns the provided valid seed."""
65+
seed_value = 45
66+
assert seed_everything(seed_value) == seed_value
67+
68+
69+
@mock.patch.dict(os.environ, {"PL_GLOBAL_SEED": "17"}, clear=True)
70+
def test_seed_everything_accepts_valid_seed_from_env():
71+
"""Ensure that seed_everything uses the valid seed from the PL_GLOBAL_SEED environment variable."""
72+
assert seed_everything() == 17
6373

6474

6575
def test_reset_seed_no_op():

tests/tests_pytorch/trainer/optimization/test_manual_optimization.py

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -304,8 +304,36 @@ def on_train_epoch_end(self, *_, **__):
304304
trainer.fit(model)
305305

306306

307+
class CustomMapping(collections.abc.Mapping):
308+
"""A custom implementation of Mapping for testing purposes."""
309+
310+
def __init__(self, *args, **kwargs):
311+
self._store = dict(*args, **kwargs)
312+
313+
def __getitem__(self, key):
314+
return self._store[key]
315+
316+
def __iter__(self):
317+
return iter(self._store)
318+
319+
def __len__(self):
320+
return len(self._store)
321+
322+
def __repr__(self):
323+
return f"{self.__class__.__name__}({self._store})"
324+
325+
def __copy__(self):
326+
cls = self.__class__
327+
new_obj = cls(self._store.copy())
328+
return new_obj
329+
330+
def copy(self):
331+
return self.__copy__()
332+
333+
307334
@RunIf(min_cuda_gpus=1)
308-
def test_multiple_optimizers_step(tmp_path):
335+
@pytest.mark.parametrize("dicttype", [dict, CustomMapping])
336+
def test_multiple_optimizers_step(tmp_path, dicttype):
309337
"""Tests that `step` works with several optimizers."""
310338

311339
class TestModel(ManualOptModel):
@@ -335,7 +363,7 @@ def training_step(self, batch, batch_idx):
335363
opt_b.step()
336364
opt_b.zero_grad()
337365

338-
return {"loss1": loss_1.detach(), "loss2": loss_2.detach()}
366+
return dicttype(loss1=loss_1.detach(), loss2=loss_2.detach())
339367

340368
# sister test: tests/plugins/test_amp_plugins.py::test_amp_gradient_unscale
341369
def on_after_backward(self) -> None:

0 commit comments

Comments
 (0)