From 513121d89614d825da51a78ab23cc7e08811462a Mon Sep 17 00:00:00 2001 From: Luca Antiga Date: Wed, 11 Dec 2024 09:45:42 +0000 Subject: [PATCH 1/2] Force hook standalone tests to single device --- tests/tests_pytorch/models/test_hooks.py | 25 +++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index 685bd6c0bdaef..5fde5b1269467 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -61,7 +61,12 @@ def on_before_zero_grad(self, optimizer): model = CurrentTestModel() - trainer = Trainer(default_root_dir=tmp_path, max_steps=max_steps, max_epochs=2) + trainer = Trainer( + devices=1, + default_root_dir=tmp_path, + max_steps=max_steps, + max_epochs=2 + ) assert model.on_before_zero_grad_called == 0 trainer.fit(model) assert max_steps == model.on_before_zero_grad_called @@ -406,7 +411,7 @@ def prepare_data(self): ... @pytest.mark.parametrize( "kwargs", [ - {}, + {"devices": 1}, # these precision plugins modify the optimization flow, so testing them explicitly pytest.param({"accelerator": "gpu", "devices": 1, "precision": "16-mixed"}, marks=RunIf(min_cuda_gpus=1)), pytest.param( @@ -528,6 +533,7 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmp_path): # initial training to get a checkpoint model = BoringModel() trainer = Trainer( + devices=1, default_root_dir=tmp_path, max_epochs=1, limit_train_batches=2, @@ -543,6 +549,7 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_epochs(tmp_path): callback = HookedCallback(called) # already performed 1 step, resume and do 2 more trainer = Trainer( + devices=1, default_root_dir=tmp_path, max_epochs=2, limit_train_batches=2, @@ -605,6 +612,7 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmp_path): # initial training to get a checkpoint model = BoringModel() trainer = Trainer( + devices=1, default_root_dir=tmp_path, max_steps=1, limit_val_batches=0, @@ -624,6 +632,7 @@ def test_trainer_model_hook_system_fit_no_val_and_resume_max_steps(tmp_path): train_batches = 2 steps_after_reload = 1 + train_batches trainer = Trainer( + devices=1, default_root_dir=tmp_path, max_steps=steps_after_reload, limit_val_batches=0, @@ -690,6 +699,7 @@ def test_trainer_model_hook_system_eval(tmp_path, override_on_x_model_train, bat assert is_overridden(f"on_{noun}_model_train", model) == override_on_x_model_train callback = HookedCallback(called) trainer = Trainer( + devices=1, default_root_dir=tmp_path, max_epochs=1, limit_val_batches=batches, @@ -731,7 +741,10 @@ def test_trainer_model_hook_system_predict(tmp_path): callback = HookedCallback(called) batches = 2 trainer = Trainer( - default_root_dir=tmp_path, limit_predict_batches=batches, enable_progress_bar=False, callbacks=[callback] + devices=1, + default_root_dir=tmp_path, + limit_predict_batches=batches, + enable_progress_bar=False, callbacks=[callback] ) trainer.predict(model) expected = [ @@ -797,7 +810,7 @@ def predict_dataloader(self): model = CustomBoringModel() - trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=5) + trainer = Trainer(devices=1, default_root_dir=tmp_path, fast_dev_run=5) trainer.fit(model) trainer.test(model) @@ -812,6 +825,7 @@ def test_trainer_datamodule_hook_system(tmp_path): model = BoringModel() batches = 2 trainer = Trainer( + devices=1, default_root_dir=tmp_path, max_epochs=1, limit_train_batches=batches, @@ -887,7 +901,7 @@ class CustomHookedModel(HookedModel): assert is_overridden("configure_model", model) == override_configure_model datamodule = CustomHookedDataModule(ldm_called) - trainer = Trainer() + trainer = Trainer(devices=1) trainer.strategy.connect(model) trainer._data_connector.attach_data(model, datamodule=datamodule) ckpt_path = str(tmp_path / "file.ckpt") @@ -960,6 +974,7 @@ def predict_step(self, *args, **kwargs): model = MixedTrainModeModule() trainer = Trainer( + devices=1, default_root_dir=tmp_path, max_epochs=1, val_check_interval=1, From 70514cb7a1f9cd6e6d903e2eb4a0949871780dda Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 09:54:08 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/tests_pytorch/models/test_hooks.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index 5fde5b1269467..1a8aeb4b297a9 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -61,12 +61,7 @@ def on_before_zero_grad(self, optimizer): model = CurrentTestModel() - trainer = Trainer( - devices=1, - default_root_dir=tmp_path, - max_steps=max_steps, - max_epochs=2 - ) + trainer = Trainer(devices=1, default_root_dir=tmp_path, max_steps=max_steps, max_epochs=2) assert model.on_before_zero_grad_called == 0 trainer.fit(model) assert max_steps == model.on_before_zero_grad_called @@ -744,7 +739,8 @@ def test_trainer_model_hook_system_predict(tmp_path): devices=1, default_root_dir=tmp_path, limit_predict_batches=batches, - enable_progress_bar=False, callbacks=[callback] + enable_progress_bar=False, + callbacks=[callback], ) trainer.predict(model) expected = [