Skip to content

Commit 2e5728a

Browse files
Update standalone tests (#12472)
1 parent 939d56c commit 2e5728a

File tree

7 files changed

+143
-44
lines changed

7 files changed

+143
-44
lines changed

tests/callbacks/test_tqdm_progress_bar.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -582,7 +582,8 @@ def test_progress_bar_max_val_check_interval(
582582
max_epochs=1,
583583
enable_model_summary=False,
584584
val_check_interval=val_check_interval,
585-
gpus=world_size,
585+
accelerator="gpu",
586+
devices=world_size,
586587
strategy="ddp",
587588
)
588589
trainer.fit(model, train_dataloaders=train_data, val_dataloaders=val_data)

tests/strategies/test_ddp_fully_sharded_with_full_state_dict.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,9 @@ def test_fully_sharded_strategy_checkpoint(tmpdir):
9696
"""Test to ensure that checkpoint is saved correctly when using a single GPU, and all stages can be run."""
9797

9898
model = TestFSDPModel()
99-
trainer = Trainer(default_root_dir=tmpdir, gpus=1, strategy="fsdp", precision=16, max_epochs=1)
99+
trainer = Trainer(
100+
default_root_dir=tmpdir, accelerator="gpu", devices=1, strategy="fsdp", precision=16, max_epochs=1
101+
)
100102
_run_multiple_stages(trainer, model, os.path.join(tmpdir, "last.ckpt"))
101103

102104

@@ -106,7 +108,15 @@ def test_fully_sharded_strategy_checkpoint_multi_gpus(tmpdir):
106108

107109
model = TestFSDPModel()
108110
ck = ModelCheckpoint(save_last=True)
109-
trainer = Trainer(default_root_dir=tmpdir, gpus=2, strategy="fsdp", precision=16, max_epochs=1, callbacks=[ck])
111+
trainer = Trainer(
112+
default_root_dir=tmpdir,
113+
accelerator="gpu",
114+
devices=2,
115+
strategy="fsdp",
116+
precision=16,
117+
max_epochs=1,
118+
callbacks=[ck],
119+
)
110120
_run_multiple_stages(trainer, model)
111121

112122

@@ -146,7 +156,8 @@ def test_fsdp_gradient_clipping_raises(tmpdir):
146156
default_root_dir=tmpdir,
147157
strategy="fsdp",
148158
fast_dev_run=True,
149-
gpus=1,
159+
accelerator="gpu",
160+
devices=1,
150161
precision=16,
151162
gradient_clip_val=1,
152163
gradient_clip_algorithm="norm",

tests/strategies/test_ddp_strategy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def test_ddp_barrier_non_consecutive_device_ids(barrier_mock, tmpdir):
6868
"""Test correct usage of barriers when device ids do not start at 0 or are not consecutive."""
6969
model = BoringModel()
7070
gpus = [1, 3]
71-
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, gpus=gpus, strategy="ddp")
71+
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, accelerator="gpu", devices=gpus, strategy="ddp")
7272
trainer.fit(model)
7373
barrier_mock.assert_any_call(device_ids=[gpus[trainer.local_rank]])
7474

tests/strategies/test_ddp_strategy_with_comm_hook.py

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@ def test_ddp_fp16_compress_comm_hook(tmpdir):
3737
strategy = DDPStrategy(ddp_comm_hook=default.fp16_compress_hook)
3838
trainer = Trainer(
3939
max_epochs=1,
40-
gpus=2,
40+
accelerator="gpu",
41+
devices=2,
4142
strategy=strategy,
4243
default_root_dir=tmpdir,
4344
sync_batchnorm=True,
@@ -60,7 +61,8 @@ def test_ddp_sgd_comm_hook(tmpdir):
6061
)
6162
trainer = Trainer(
6263
max_epochs=1,
63-
gpus=2,
64+
accelerator="gpu",
65+
devices=2,
6466
strategy=strategy,
6567
default_root_dir=tmpdir,
6668
sync_batchnorm=True,
@@ -84,7 +86,8 @@ def test_ddp_fp16_compress_wrap_sgd_comm_hook(tmpdir):
8486
)
8587
trainer = Trainer(
8688
max_epochs=1,
87-
gpus=2,
89+
accelerator="gpu",
90+
devices=2,
8891
strategy=strategy,
8992
default_root_dir=tmpdir,
9093
sync_batchnorm=True,
@@ -104,7 +107,8 @@ def test_ddp_spawn_fp16_compress_comm_hook(tmpdir):
104107
strategy = DDPSpawnStrategy(ddp_comm_hook=default.fp16_compress_hook)
105108
trainer = Trainer(
106109
max_epochs=1,
107-
gpus=2,
110+
accelerator="gpu",
111+
devices=2,
108112
strategy=strategy,
109113
default_root_dir=tmpdir,
110114
sync_batchnorm=True,
@@ -130,7 +134,8 @@ def test_ddp_post_local_sgd_comm_hook(tmpdir):
130134
)
131135
trainer = Trainer(
132136
fast_dev_run=True,
133-
gpus=2,
137+
accelerator="gpu",
138+
devices=2,
134139
strategy=strategy,
135140
default_root_dir=tmpdir,
136141
sync_batchnorm=True,
@@ -151,7 +156,8 @@ def test_post_local_sgd_model_averaging(average_parameters_mock, tmpdir):
151156
# test regular ddp does not call model averaging
152157
trainer = Trainer(
153158
fast_dev_run=True,
154-
gpus=2,
159+
accelerator="gpu",
160+
devices=2,
155161
strategy="ddp",
156162
default_root_dir=tmpdir,
157163
sync_batchnorm=True,
@@ -207,7 +213,8 @@ def configure_optimizers(self):
207213

208214
trainer = Trainer(
209215
fast_dev_run=True,
210-
gpus=2,
216+
accelerator="gpu",
217+
devices=2,
211218
strategy=strategy,
212219
default_root_dir=tmpdir,
213220
sync_batchnorm=True,

0 commit comments

Comments
 (0)