Skip to content

Commit 00d3675

Browse files
carmoccalexierule
authored andcommitted
Rename special to standalone (#10779)
1 parent 288d018 commit 00d3675

32 files changed

+94
-94
lines changed

.azure-pipelines/gpu-tests.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,10 @@ jobs:
7272
displayName: 'Testing: standard'
7373
7474
- bash: |
75-
bash tests/special_tests.sh
75+
bash tests/standalone_tests.sh
7676
env:
7777
PL_USE_MOCKED_MNIST: "1"
78-
displayName: 'Testing: special'
78+
displayName: 'Testing: standalone'
7979
8080
- bash: |
8181
python -m coverage report

tests/accelerators/test_accelerator_connector.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -323,8 +323,8 @@ def on_fit_start(self, trainer, pl_module):
323323
trainer.fit(model)
324324

325325

326-
@RunIf(special=True)
327-
def test_accelerator_choice_ddp_cpu_and_plugin(tmpdir):
326+
@RunIf(skip_windows=True, standalone=True)
327+
def test_accelerator_choice_ddp_cpu_and_strategy(tmpdir):
328328
"""Test that accelerator="ddp_cpu" can work together with an instance of DDPPlugin."""
329329
_test_accelerator_choice_ddp_cpu_and_plugin(tmpdir, ddp_plugin_class=DDPPlugin)
330330

tests/accelerators/test_ddp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def setup(self, stage: Optional[str] = None) -> None:
108108
trainer.fit(model)
109109

110110

111-
@RunIf(min_gpus=2, min_torch="1.8.1", special=True)
111+
@RunIf(min_gpus=2, min_torch="1.8.1", standalone=True)
112112
@pytest.mark.parametrize("precision", (16, 32))
113113
def test_ddp_wrapper(tmpdir, precision):
114114
"""Test parameters to ignore are carried over for DDP."""

tests/accelerators/test_multi_nodes_gpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
# TODO(Borda): When multi-node tests are re-enabled (.github/workflows/ci_test-mnodes.yml)
3232
# use an environment variable `PL_RUNNING_MULTINODE_TESTS` and set `RunIf(multinode=True)`
3333
@pytest.mark.skip("Multi-node testing is currently disabled")
34-
@RunIf(special=True)
34+
@RunIf(standalone=True)
3535
def test_logging_sync_dist_true_ddp(tmpdir):
3636
"""Tests to ensure that the sync_dist flag works with CPU (should just return the original value)"""
3737
fake_result = 1
@@ -68,7 +68,7 @@ def validation_step(self, batch, batch_idx):
6868
# TODO(Borda): When multi-node tests are re-enabled (.github/workflows/ci_test-mnodes.yml)
6969
# use an environment variable `PL_RUNNING_MULTINODE_TESTS` and set `RunIf(multinode=True)`
7070
@pytest.mark.skip("Multi-node testing is currently disabled")
71-
@RunIf(special=True)
71+
@RunIf(standalone=True)
7272
def test__validation_step__log(tmpdir):
7373
"""Tests that validation_step can log."""
7474

tests/callbacks/test_pruning.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def test_pruning_callback(
160160
)
161161

162162

163-
@RunIf(special=True, min_gpus=2)
163+
@RunIf(standalone=True, min_gpus=2)
164164
@pytest.mark.parametrize("parameters_to_prune", (False, True))
165165
@pytest.mark.parametrize("use_global_unstructured", (False, True))
166166
def test_pruning_callback_ddp(tmpdir, parameters_to_prune, use_global_unstructured):

tests/callbacks/test_stochastic_weight_avg.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def train_with_swa(
138138
assert trainer.lightning_module == model
139139

140140

141-
@RunIf(min_gpus=2, special=True)
141+
@RunIf(min_gpus=2, standalone=True)
142142
def test_swa_callback_ddp(tmpdir):
143143
train_with_swa(tmpdir, strategy="ddp", gpus=2)
144144

tests/callbacks/test_tqdm_progress_bar.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -521,7 +521,7 @@ def test_tqdm_progress_bar_can_be_pickled():
521521
pickle.dumps(bar)
522522

523523

524-
@RunIf(min_gpus=2, special=True)
524+
@RunIf(min_gpus=2, standalone=True)
525525
@pytest.mark.parametrize(
526526
["total_train_samples", "train_batch_size", "total_val_samples", "val_batch_size", "val_check_interval"],
527527
[(8, 4, 2, 1, 0.2), (8, 4, 2, 1, 0.5)],

tests/checkpointing/test_checkpoint_callback_frequency.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def training_step(self, batch, batch_idx):
8787

8888

8989
@mock.patch("torch.save")
90-
@RunIf(special=True, min_gpus=2)
90+
@RunIf(standalone=True, min_gpus=2)
9191
@pytest.mark.parametrize(["k", "epochs", "val_check_interval", "expected"], [(1, 1, 1.0, 1), (2, 2, 0.3, 4)])
9292
def test_top_k_ddp(save_mock, tmpdir, k, epochs, val_check_interval, expected):
9393
class TestModel(BoringModel):

tests/conftest.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -159,13 +159,13 @@ def single_process_pg():
159159

160160

161161
def pytest_collection_modifyitems(items):
162-
if os.getenv("PL_RUNNING_SPECIAL_TESTS", "0") != "1":
162+
if os.getenv("PL_RUN_STANDALONE_TESTS", "0") != "1":
163163
return
164-
# filter out non-special tests
164+
# filter out non-standalone tests
165165
items[:] = [
166166
item
167167
for item in items
168168
for marker in item.own_markers
169-
# has `@RunIf(special=True)`
170-
if marker.name == "skipif" and marker.kwargs.get("special")
169+
# has `@RunIf(standalone=True)`
170+
if marker.name == "skipif" and marker.kwargs.get("standalone")
171171
]

tests/core/test_metric_result_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -482,7 +482,7 @@ def test_result_collection_reload_1_gpu_ddp(tmpdir):
482482
result_collection_reload(default_root_dir=tmpdir, strategy="ddp", gpus=1)
483483

484484

485-
@RunIf(min_gpus=2, special=True)
485+
@RunIf(min_gpus=2, standalone=True)
486486
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
487487
@pytest.mark.skipif(not _TORCH_GREATER_EQUAL_1_7, reason="Requires at least PyTorch 1.7")
488488
def test_result_collection_reload_2_gpus(tmpdir):

0 commit comments

Comments
 (0)