Skip to content

Commit 119a8ed

Browse files
Reviewer feedback: Move decorator to test classes
... instead of having them on each test method.
1 parent bd1da66 commit 119a8ed

File tree

2 files changed

+10
-32
lines changed

2 files changed

+10
-32
lines changed

tests/models/test_modeling_common.py

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1525,6 +1525,11 @@ def test_push_to_hub_library_name(self):
15251525
delete_repo(self.repo_id, token=TOKEN)
15261526

15271527

1528+
@slow
1529+
@require_torch_2
1530+
@require_torch_accelerator
1531+
@require_peft_backend
1532+
@is_torch_compile
15281533
class TestLoraHotSwappingForModel(unittest.TestCase):
15291534
"""Test that hotswapping does not result in recompilation on the model directly.
15301535
@@ -1667,42 +1672,26 @@ def check_model_hotswap(self, do_compile, rank0, rank1, target_modules):
16671672
unet.load_lora_adapter(file_name1, adapter_name=name, hotswap=True)
16681673

16691674
@parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa
1670-
@slow
1671-
@require_torch_2
1672-
@require_torch_accelerator
1673-
@require_peft_backend
16741675
def test_hotswapping_model(self, rank0, rank1):
16751676
self.check_model_hotswap(
16761677
do_compile=False, rank0=rank0, rank1=rank1, target_modules=["to_q", "to_k", "to_v", "to_out.0"]
16771678
)
16781679

16791680
@parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa
1680-
@slow
1681-
@require_torch_2
1682-
@require_torch_accelerator
1683-
@require_peft_backend
16841681
def test_hotswapping_compiled_model_linear(self, rank0, rank1):
16851682
# It's important to add this context to raise an error on recompilation
16861683
target_modules = ["to_q", "to_k", "to_v", "to_out.0"]
16871684
with torch._dynamo.config.patch(error_on_recompile=True):
16881685
self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules=target_modules)
16891686

16901687
@parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa
1691-
@slow
1692-
@require_torch_2
1693-
@require_torch_accelerator
1694-
@require_peft_backend
16951688
def test_hotswapping_compiled_model_conv2d(self, rank0, rank1):
16961689
# It's important to add this context to raise an error on recompilation
16971690
target_modules = ["conv", "conv1", "conv2"]
16981691
with torch._dynamo.config.patch(error_on_recompile=True):
16991692
self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules=target_modules)
17001693

17011694
@parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa
1702-
@slow
1703-
@require_torch_2
1704-
@require_torch_accelerator
1705-
@require_peft_backend
17061695
def test_hotswapping_compiled_model_both_linear_and_conv2d(self, rank0, rank1):
17071696
# It's important to add this context to raise an error on recompilation
17081697
target_modules = ["to_q", "conv"]

tests/pipelines/test_pipelines.py

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2178,6 +2178,11 @@ def test_ddpm_ddim_equality_batched(self):
21782178
assert np.abs(ddpm_images - ddim_images).max() < 1e-1
21792179

21802180

2181+
@slow
2182+
@require_torch_2
2183+
@require_torch_accelerator
2184+
@require_peft_backend
2185+
@is_torch_compile
21812186
class TestLoraHotSwappingForPipeline(unittest.TestCase):
21822187
"""Test that hotswapping does not result in recompilation in a pipeline.
21832188
@@ -2297,42 +2302,26 @@ def check_pipeline_hotswap(self, do_compile, rank0, rank1, target_modules):
22972302
assert np.allclose(output1_before, output1_after, atol=tol, rtol=tol)
22982303

22992304
@parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa
2300-
@slow
2301-
@require_torch_2
2302-
@require_torch_accelerator
2303-
@require_peft_backend
23042305
def test_hotswapping_pipeline(self, rank0, rank1):
23052306
self.check_pipeline_hotswap(
23062307
do_compile=False, rank0=rank0, rank1=rank1, target_modules=["to_q", "to_k", "to_v", "to_out.0"]
23072308
)
23082309

23092310
@parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa
2310-
@slow
2311-
@require_torch_2
2312-
@require_torch_accelerator
2313-
@require_peft_backend
23142311
def test_hotswapping_compiled_pipline_linear(self, rank0, rank1):
23152312
# It's important to add this context to raise an error on recompilation
23162313
target_modules = ["to_q", "to_k", "to_v", "to_out.0"]
23172314
with torch._dynamo.config.patch(error_on_recompile=True):
23182315
self.check_pipeline_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules=target_modules)
23192316

23202317
@parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa
2321-
@slow
2322-
@require_torch_2
2323-
@require_torch_accelerator
2324-
@require_peft_backend
23252318
def test_hotswapping_compiled_pipline_conv2d(self, rank0, rank1):
23262319
# It's important to add this context to raise an error on recompilation
23272320
target_modules = ["conv", "conv1", "conv2"]
23282321
with torch._dynamo.config.patch(error_on_recompile=True):
23292322
self.check_pipeline_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules=target_modules)
23302323

23312324
@parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa
2332-
@slow
2333-
@require_torch_2
2334-
@require_torch_accelerator
2335-
@require_peft_backend
23362325
def test_hotswapping_compiled_pipline_both_linear_and_conv2d(self, rank0, rank1):
23372326
# It's important to add this context to raise an error on recompilation
23382327
target_modules = ["to_q", "conv"]

0 commit comments

Comments
 (0)