Skip to content

Commit a2d179b

Browse files
[pre-commit.ci] pre-commit suggestions (#20968)
* [pre-commit.ci] pre-commit suggestions updates: - [github.com/PyCQA/docformatter: 06907d0267368b49b9180eed423fae5697c1e909 → v1.7.7](PyCQA/docformatter@06907d0...v1.7.7) - [github.com/astral-sh/ruff-pre-commit: v0.11.4 → v0.12.2](astral-sh/ruff-pre-commit@v0.11.4...v0.12.2) - [github.com/pre-commit/mirrors-prettier: v3.1.0 → v4.0.0-alpha.8](pre-commit/mirrors-prettier@v3.1.0...v4.0.0-alpha.8) * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka Borovec <[email protected]> Co-authored-by: Jirka B <[email protected]>
1 parent bcfa4dd commit a2d179b

File tree

8 files changed

+61
-53
lines changed

8 files changed

+61
-53
lines changed

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ repos:
5858
#args: ["--write-changes"] # uncomment if you want to get automatic fixing
5959

6060
- repo: https://github.com/PyCQA/docformatter
61-
rev: 06907d0267368b49b9180eed423fae5697c1e909 # todo: fix for docformatter after last 1.7.5
61+
rev: v1.7.7
6262
hooks:
6363
- id: docformatter
6464
additional_dependencies: [tomli]
@@ -70,7 +70,7 @@ repos:
7070
- id: sphinx-lint
7171

7272
- repo: https://github.com/astral-sh/ruff-pre-commit
73-
rev: v0.11.4
73+
rev: v0.12.2
7474
hooks:
7575
# try to fix what is possible
7676
- id: ruff

src/lightning/pytorch/core/optimizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def _configure_schedulers_automatic_opt(schedulers: list, monitor: Optional[str]
274274
scheduler["reduce_on_plateau"] = scheduler.get(
275275
"reduce_on_plateau", isinstance(scheduler["scheduler"], optim.lr_scheduler.ReduceLROnPlateau)
276276
)
277-
if scheduler["reduce_on_plateau"] and scheduler.get("monitor", None) is None:
277+
if scheduler["reduce_on_plateau"] and scheduler.get("monitor") is None:
278278
raise MisconfigurationException(
279279
"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used."
280280
' For example: {"optimizer": optimizer, "lr_scheduler":'

tests/tests_pytorch/callbacks/test_finetuning_callback.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,8 @@ def configure_optimizers(self):
109109
model.validation_step = None
110110
callback = TestBackboneFinetuningWarningCallback(unfreeze_backbone_at_epoch=3, verbose=False)
111111

112+
trainer = Trainer(limit_train_batches=1, default_root_dir=tmp_path, callbacks=[callback, chk], max_epochs=2)
112113
with pytest.warns(UserWarning, match="Did you init your optimizer in"):
113-
trainer = Trainer(limit_train_batches=1, default_root_dir=tmp_path, callbacks=[callback, chk], max_epochs=2)
114114
trainer.fit(model)
115115

116116
assert model.backbone.has_been_used

tests/tests_pytorch/profilers/test_profiler.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,9 @@ def test_simple_profiler_durations(simple_profiler, action: str, expected: list)
7373
np.testing.assert_allclose(simple_profiler.recorded_durations[action], expected, rtol=0.2)
7474

7575

76-
def test_simple_profiler_overhead(simple_profiler, n_iter=5):
76+
def test_simple_profiler_overhead(simple_profiler):
7777
"""Ensure that the profiler doesn't introduce too much overhead during training."""
78-
for _ in range(n_iter):
78+
for _ in range(5):
7979
with simple_profiler.profile("no-op"):
8080
pass
8181

@@ -284,8 +284,9 @@ def test_advanced_profiler_durations(advanced_profiler, action: str, expected: l
284284

285285

286286
@pytest.mark.flaky(reruns=3)
287-
def test_advanced_profiler_overhead(advanced_profiler, n_iter=5):
287+
def test_advanced_profiler_overhead(advanced_profiler):
288288
"""Ensure that the profiler doesn't introduce too much overhead during training."""
289+
n_iter = 5
289290
for _ in range(n_iter):
290291
with advanced_profiler.profile("no-op"):
291292
pass
@@ -620,8 +621,8 @@ def test_pytorch_profiler_raises_warning_for_limited_steps(tmp_path, trainer_con
620621
warning_cache.clear()
621622
with pytest.warns(UserWarning, match="not enough steps to properly record traces"):
622623
getattr(trainer, trainer_fn)(model)
623-
assert trainer.profiler._schedule is None
624-
warning_cache.clear()
624+
assert trainer.profiler._schedule is None
625+
warning_cache.clear()
625626

626627

627628
def test_profile_callbacks(tmp_path):

tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -234,9 +234,9 @@ def on_test_epoch_end(self):
234234

235235

236236
@pytest.mark.parametrize("suffix", [False, True])
237-
def test_multi_dataloaders_add_suffix_properly(tmp_path, suffix):
237+
def test_multi_dataloaders_add_suffix_properly(suffix, tmp_path):
238238
class TestModel(BoringModel):
239-
def test_step(self, batch, batch_idx, dataloader_idx=0):
239+
def test_step(self, batch, batch_idx, dataloader_idx=0): # noqa: PT028
240240
out = super().test_step(batch, batch_idx)
241241
self.log("test_loss", out["y"], on_step=True, on_epoch=True)
242242
return out
@@ -441,7 +441,7 @@ def on_test_epoch_end(self, _, pl_module):
441441
class TestModel(BoringModel):
442442
seen_losses = {i: [] for i in range(num_dataloaders)}
443443

444-
def test_step(self, batch, batch_idx, dataloader_idx=0):
444+
def test_step(self, batch, batch_idx, dataloader_idx=0): # noqa: PT028
445445
loss = super().test_step(batch, batch_idx)["y"]
446446
self.log("test_loss", loss)
447447
self.seen_losses[dataloader_idx].append(loss)

tests/tests_pytorch/trainer/test_config_validator.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
import pytest
1717
import torch
1818

19-
from lightning.fabric.utilities.warnings import PossibleUserWarning
2019
from lightning.pytorch import LightningDataModule, LightningModule, Trainer
2120
from lightning.pytorch.demos.boring_classes import BoringModel, RandomDataset
2221
from lightning.pytorch.trainer.configuration_validator import (
@@ -46,20 +45,19 @@ def test_wrong_configure_optimizers(tmp_path):
4645
trainer.fit(model)
4746

4847

49-
def test_fit_val_loop_config(tmp_path):
48+
@pytest.mark.parametrize("model_attrib", ["validation_step", "val_dataloader"])
49+
def test_fit_val_loop_config(model_attrib, tmp_path):
5050
"""When either val loop or val data are missing raise warning."""
5151
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1)
5252

53-
# no val data has val loop
54-
with pytest.warns(UserWarning, match=r"You passed in a `val_dataloader` but have no `validation_step`"):
55-
model = BoringModel()
56-
model.validation_step = None
57-
trainer.fit(model)
58-
59-
# has val loop but no val data
60-
with pytest.warns(PossibleUserWarning, match=r"You defined a `validation_step` but have no `val_dataloader`"):
61-
model = BoringModel()
62-
model.val_dataloader = None
53+
model = BoringModel()
54+
setattr(model, model_attrib, None)
55+
match_msg = (
56+
r"You passed in a `val_dataloader` but have no `validation_step`"
57+
if model_attrib == "validation_step"
58+
else "You defined a `validation_step` but have no `val_dataloader`"
59+
)
60+
with pytest.warns(UserWarning, match=match_msg):
6361
trainer.fit(model)
6462

6563

tests/tests_pytorch/trainer/test_dataloaders.py

Lines changed: 35 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -545,13 +545,14 @@ def test_warning_with_few_workers(_, tmp_path, ckpt_path, stage):
545545

546546
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2)
547547

548-
with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
549-
if stage == "test":
550-
if ckpt_path in ("specific", "best"):
551-
trainer.fit(model, train_dataloaders=train_dl, val_dataloaders=val_dl)
552-
ckpt_path = trainer.checkpoint_callback.best_model_path if ckpt_path == "specific" else ckpt_path
548+
if stage == "test":
549+
if ckpt_path in ("specific", "best"):
550+
trainer.fit(model, train_dataloaders=train_dl, val_dataloaders=val_dl)
551+
ckpt_path = trainer.checkpoint_callback.best_model_path if ckpt_path == "specific" else ckpt_path
552+
with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
553553
trainer.test(model, dataloaders=train_dl, ckpt_path=ckpt_path)
554-
else:
554+
else:
555+
with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
555556
trainer.fit(model, train_dataloaders=train_dl, val_dataloaders=val_dl)
556557

557558

@@ -579,16 +580,15 @@ def training_step(self, batch, batch_idx):
579580

580581
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2)
581582

582-
with pytest.warns(
583-
UserWarning,
584-
match=f"The '{stage}_dataloader' does not have many workers",
585-
):
586-
if stage == "test":
587-
if ckpt_path in ("specific", "best"):
588-
trainer.fit(model, train_dataloaders=train_multi_dl, val_dataloaders=val_multi_dl)
589-
ckpt_path = trainer.checkpoint_callback.best_model_path if ckpt_path == "specific" else ckpt_path
583+
if stage == "test":
584+
if ckpt_path in ("specific", "best"):
585+
trainer.fit(model, train_dataloaders=train_multi_dl, val_dataloaders=val_multi_dl)
586+
ckpt_path = trainer.checkpoint_callback.best_model_path if ckpt_path == "specific" else ckpt_path
587+
588+
with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
590589
trainer.test(model, dataloaders=test_multi_dl, ckpt_path=ckpt_path)
591-
else:
590+
else:
591+
with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
592592
trainer.fit(model, train_dataloaders=train_multi_dl, val_dataloaders=val_multi_dl)
593593

594594

@@ -669,28 +669,35 @@ def test_auto_add_worker_init_fn_distributed(tmp_path, monkeypatch):
669669
trainer.fit(model, train_dataloaders=dataloader)
670670

671671

672-
def test_warning_with_small_dataloader_and_logging_interval(tmp_path):
672+
@pytest.mark.parametrize("log_interval", [2, 11])
673+
def test_warning_with_small_dataloader_and_logging_interval(log_interval, tmp_path):
673674
"""Test that a warning message is shown if the dataloader length is too short for the chosen logging interval."""
674675
model = BoringModel()
675676
dataloader = DataLoader(RandomDataset(32, length=10))
676677
model.train_dataloader = lambda: dataloader
677678

678-
with pytest.warns(UserWarning, match=r"The number of training batches \(10\) is smaller than the logging interval"):
679-
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, log_every_n_steps=11, logger=CSVLogger(tmp_path))
679+
trainer = Trainer(
680+
default_root_dir=tmp_path,
681+
max_epochs=1,
682+
log_every_n_steps=log_interval,
683+
limit_train_batches=1 if log_interval < 10 else None,
684+
logger=CSVLogger(tmp_path),
685+
)
686+
with pytest.warns(
687+
UserWarning,
688+
match=rf"The number of training batches \({log_interval - 1}\) is smaller than the logging interval",
689+
):
680690
trainer.fit(model)
681691

682-
with pytest.warns(UserWarning, match=r"The number of training batches \(1\) is smaller than the logging interval"):
683-
trainer = Trainer(
684-
default_root_dir=tmp_path,
685-
max_epochs=1,
686-
log_every_n_steps=2,
687-
limit_train_batches=1,
688-
logger=CSVLogger(tmp_path),
689-
)
690-
trainer.fit(model)
691692

693+
def test_warning_with_small_dataloader_and_fast_dev_run(tmp_path):
694+
"""Test that a warning message is shown if the dataloader length is too short for the chosen logging interval."""
695+
model = BoringModel()
696+
dataloader = DataLoader(RandomDataset(32, length=10))
697+
model.train_dataloader = lambda: dataloader
698+
699+
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=True, log_every_n_steps=2)
692700
with no_warning_call(UserWarning, match="The number of training batches"):
693-
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=True, log_every_n_steps=2)
694701
trainer.fit(model)
695702

696703

tests/tests_pytorch/utilities/test_model_summary.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import pytest
1919
import torch
2020
import torch.nn as nn
21+
from lightning_utilities.test.warning import no_warning_call
2122

2223
from lightning.pytorch import LightningModule, Trainer
2324
from lightning.pytorch.demos.boring_classes import BoringModel
@@ -348,7 +349,7 @@ def test_model_size_warning_on_unsupported_precision(tmp_path):
348349

349350
with pytest.warns(UserWarning, match="Precision .* is not supported by the model summary.*"):
350351
summary = summarize(model)
351-
assert model.pre_calculated_model_size == summary.model_size
352+
assert model.pre_calculated_model_size == summary.model_size
352353

353354

354355
def test_lazy_model_summary():
@@ -358,6 +359,7 @@ def test_lazy_model_summary():
358359

359360
with pytest.warns(UserWarning, match="The total number of parameters detected may be inaccurate."):
360361
assert summary.total_parameters == 0
362+
with no_warning_call():
361363
assert summary.trainable_parameters == 0
362364

363365

0 commit comments

Comments
 (0)