Skip to content

Commit 810726c

Browse files
committed
update
1 parent f86ccac commit 810726c

8 files changed

+142
-260
lines changed

tests/lora/test_lora_layers_cogvideox.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,9 @@ def get_dummy_inputs(self, with_generator=True):
120120

121121
return noise, input_ids, pipeline_inputs
122122

123-
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
124-
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
123+
@parameterized.expand([("simple",), ("weighted",), ("block_lora",), ("delete_adapter",)])
124+
def test_lora_set_adapters_scenarios(self, scenario):
125+
super()._test_lora_set_adapters_scenarios(scenario, expected_atol=9e-3)
125126

126127
@parameterized.expand(
127128
[
@@ -137,7 +138,7 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
137138
]
138139
)
139140
def test_lora_actions(self, action, components_to_add):
140-
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
141+
super()._test_lora_actions(action, components_to_add, expected_atol=9e-3)
141142

142143
def test_lora_scale_kwargs_match_fusion(self):
143144
super().test_lora_scale_kwargs_match_fusion(expected_atol=9e-3, expected_rtol=9e-3)

tests/lora/test_lora_layers_cogview4.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -110,9 +110,6 @@ def get_dummy_inputs(self, with_generator=True):
110110

111111
return noise, input_ids, pipeline_inputs
112112

113-
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
114-
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
115-
116113
@parameterized.expand(
117114
[
118115
# Test actions on text_encoder LoRA only
@@ -127,7 +124,7 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
127124
]
128125
)
129126
def test_lora_actions(self, action, components_to_add):
130-
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
127+
super()._test_lora_actions(action, components_to_add, expected_atol=9e-3)
131128

132129
@parameterized.expand([("block_level", True), ("leaf_level", False)])
133130
@require_torch_accelerator

tests/lora/test_lora_layers_hunyuanvideo.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,9 @@ def get_dummy_inputs(self, with_generator=True):
151151

152152
return noise, input_ids, pipeline_inputs
153153

154-
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
155-
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
154+
@parameterized.expand([("simple",), ("weighted",), ("block_lora",), ("delete_adapter",)])
155+
def test_lora_set_adapters_scenarios(self, scenario):
156+
super()._test_lora_set_adapters_scenarios(scenario, expected_atol=9e-3, expected_rtol=9e-3)
156157

157158
@parameterized.expand(
158159
[
@@ -168,7 +169,7 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
168169
]
169170
)
170171
def test_lora_actions(self, action, components_to_add):
171-
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
172+
super()._test_lora_actions(action, components_to_add, expected_atol=9e-3)
172173

173174
@unittest.skip("Not supported in HunyuanVideo.")
174175
def test_modify_padding_mode(self):

tests/lora/test_lora_layers_ltx_video.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,9 @@ def get_dummy_inputs(self, with_generator=True):
109109

110110
return noise, input_ids, pipeline_inputs
111111

112-
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
113-
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
112+
@parameterized.expand([("simple",), ("weighted",), ("block_lora",), ("delete_adapter",)])
113+
def test_lora_set_adapters_scenarios(self, scenario):
114+
super()._test_lora_set_adapters_scenarios(scenario, expected_atol=9e-3)
114115

115116
@parameterized.expand(
116117
[
@@ -126,7 +127,7 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
126127
]
127128
)
128129
def test_lora_actions(self, action, components_to_add):
129-
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
130+
super()._test_lora_actions(action, components_to_add, expected_atol=9e-3)
130131

131132
@unittest.skip("Not supported in LTXVideo.")
132133
def test_modify_padding_mode(self):

tests/lora/test_lora_layers_mochi.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -96,8 +96,9 @@ def get_dummy_inputs(self, with_generator=True):
9696

9797
return noise, input_ids, pipeline_inputs
9898

99-
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
100-
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
99+
@parameterized.expand([("simple",), ("weighted",), ("block_lora",), ("delete_adapter",)])
100+
def test_lora_set_adapters_scenarios(self, scenario):
101+
super()._test_lora_set_adapters_scenarios(scenario, expected_atol=9e-3)
101102

102103
@parameterized.expand(
103104
[
@@ -113,7 +114,7 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
113114
]
114115
)
115116
def test_lora_actions(self, action, components_to_add):
116-
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
117+
super()._test_lora_actions(action, components_to_add, expected_atol=9e-3)
117118

118119
@unittest.skip("Not supported in Mochi.")
119120
def test_modify_padding_mode(self):

tests/lora/test_lora_layers_sdxl.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -139,18 +139,19 @@ def test_lora_actions(self, action, components_to_add):
139139
expected_atol = 1e-3
140140
expected_rtol = 1e-3
141141

142-
super().test_lora_actions(expected_atol=expected_atol, expected_rtol=expected_rtol)
142+
super()._test_lora_actions(action, components_to_add, expected_atol=expected_atol, expected_rtol=expected_rtol)
143143

144-
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
144+
@parameterized.expand([("simple",), ("weighted",), ("block_lora",), ("delete_adapter",), ("fused_multi",)])
145+
def test_lora_set_adapters_scenarios(self, scenario):
145146
if torch.cuda.is_available():
146147
expected_atol = 9e-2
147148
expected_rtol = 9e-2
148149
else:
149150
expected_atol = 1e-3
150151
expected_rtol = 1e-3
151152

152-
super().test_simple_inference_with_text_lora_denoiser_fused_multi(
153-
expected_atol=expected_atol, expected_rtol=expected_rtol
153+
super()._test_lora_set_adapters_scenarios(
154+
scenario=scenario, expected_atol=expected_atol, expected_rtol=expected_rtol
154155
)
155156

156157
def test_lora_scale_kwargs_match_fusion(self):

tests/lora/test_lora_layers_wan.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,9 @@ def get_dummy_inputs(self, with_generator=True):
105105

106106
return noise, input_ids, pipeline_inputs
107107

108-
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
109-
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
108+
@parameterized.expand([("simple",), ("weighted",), ("block_lora",), ("delete_adapter",)])
109+
def test_lora_set_adapters_scenarios(self, scenario):
110+
super()._test_lora_set_adapters_scenarios(scenario, expected_atol=9e-3)
110111

111112
@parameterized.expand(
112113
[
@@ -122,7 +123,7 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
122123
]
123124
)
124125
def test_lora_actions(self, action, components_to_add):
125-
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
126+
super()._test_lora_actions(action, components_to_add, expected_atol=9e-3)
126127

127128
@unittest.skip("Not supported in Wan.")
128129
def test_modify_padding_mode(self):

0 commit comments

Comments
 (0)