Skip to content

Commit e56b8d7

Browse files
committed
update
1 parent 1d6aa8a commit e56b8d7

12 files changed

+85
-489
lines changed

tests/lora/test_lora_layers_auraflow.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -103,18 +103,6 @@ def get_dummy_inputs(self, with_generator=True):
103103

104104
return noise, input_ids, pipeline_inputs
105105

106-
@unittest.skip("Not supported in AuraFlow.")
107-
def test_simple_inference_with_text_denoiser_block_scale(self):
108-
pass
109-
110-
@unittest.skip("Not supported in AuraFlow.")
111-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
112-
pass
113-
114106
@unittest.skip("Not supported in AuraFlow.")
115107
def test_modify_padding_mode(self):
116108
pass
117-
118-
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
119-
def test_simple_inference_with_partial_text_lora(self):
120-
pass

tests/lora/test_lora_layers_cogvideox.py

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -149,22 +149,8 @@ def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
149149
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
150150
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)
151151

152-
@unittest.skip("Not supported in CogVideoX.")
153-
def test_simple_inference_with_text_denoiser_block_scale(self):
154-
pass
155-
156-
@unittest.skip("Not supported in CogVideoX.")
157-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
158-
pass
159-
160152
@unittest.skip("Not supported in CogVideoX.")
161153
def test_modify_padding_mode(self):
162154
pass
163155

164-
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
165-
def test_simple_inference_with_partial_text_lora(self):
166-
pass
167-
168-
@unittest.skip("Not supported in CogVideoX.")
169-
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
170-
pass
156+
# TODO: skip them properly

tests/lora/test_lora_layers_cogview4.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -136,18 +136,6 @@ def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
136136
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
137137
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)
138138

139-
@unittest.skip("Not supported in CogView4.")
140-
def test_simple_inference_with_text_denoiser_block_scale(self):
141-
pass
142-
143-
@unittest.skip("Not supported in CogView4.")
144-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
145-
pass
146-
147139
@unittest.skip("Not supported in CogView4.")
148140
def test_modify_padding_mode(self):
149141
pass
150-
151-
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
152-
def test_simple_inference_with_partial_text_lora(self):
153-
pass

tests/lora/test_lora_layers_flux.py

Lines changed: 2 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -263,21 +263,11 @@ def test_lora_expansion_works_for_extra_keys(self):
263263
"LoRA should lead to different results.",
264264
)
265265

266-
@unittest.skip("Not supported in Flux.")
267-
def test_simple_inference_with_text_denoiser_block_scale(self):
268-
pass
269-
270-
@unittest.skip("Not supported in Flux.")
271-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
272-
pass
273-
274266
@unittest.skip("Not supported in Flux.")
275267
def test_modify_padding_mode(self):
276268
pass
277269

278-
@unittest.skip("Not supported in Flux.")
279-
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
280-
pass
270+
# TODO: skip them properly
281271

282272

283273
class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
@@ -791,21 +781,11 @@ def test_lora_unload_with_parameter_expanded_shapes_and_no_reset(self):
791781
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2)
792782
self.assertTrue(pipe.transformer.config.in_channels == in_features * 2)
793783

794-
@unittest.skip("Not supported in Flux.")
795-
def test_simple_inference_with_text_denoiser_block_scale(self):
796-
pass
797-
798-
@unittest.skip("Not supported in Flux.")
799-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
800-
pass
801-
802784
@unittest.skip("Not supported in Flux.")
803785
def test_modify_padding_mode(self):
804786
pass
805787

806-
@unittest.skip("Not supported in Flux.")
807-
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
808-
pass
788+
# TODO: skip them properly
809789

810790

811791
@slow

tests/lora/test_lora_layers_hunyuanvideo.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -170,22 +170,10 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
170170
def test_lora_actions(self, action, components_to_add):
171171
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
172172

173-
@unittest.skip("Not supported in HunyuanVideo.")
174-
def test_simple_inference_with_text_denoiser_block_scale(self):
175-
pass
176-
177-
@unittest.skip("Not supported in HunyuanVideo.")
178-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
179-
pass
180-
181173
@unittest.skip("Not supported in HunyuanVideo.")
182174
def test_modify_padding_mode(self):
183175
pass
184176

185-
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
186-
def test_simple_inference_with_partial_text_lora(self):
187-
pass
188-
189177

190178
@nightly
191179
@require_torch_accelerator

tests/lora/test_lora_layers_ltx_video.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -128,18 +128,6 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
128128
def test_lora_actions(self, action, components_to_add):
129129
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
130130

131-
@unittest.skip("Not supported in LTXVideo.")
132-
def test_simple_inference_with_text_denoiser_block_scale(self):
133-
pass
134-
135-
@unittest.skip("Not supported in LTXVideo.")
136-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
137-
pass
138-
139131
@unittest.skip("Not supported in LTXVideo.")
140132
def test_modify_padding_mode(self):
141133
pass
142-
143-
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
144-
def test_simple_inference_with_partial_text_lora(self):
145-
pass

tests/lora/test_lora_layers_lumina2.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -101,22 +101,10 @@ def get_dummy_inputs(self, with_generator=True):
101101

102102
return noise, input_ids, pipeline_inputs
103103

104-
@unittest.skip("Not supported in Lumina2.")
105-
def test_simple_inference_with_text_denoiser_block_scale(self):
106-
pass
107-
108-
@unittest.skip("Not supported in Lumina2.")
109-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
110-
pass
111-
112104
@unittest.skip("Not supported in Lumina2.")
113105
def test_modify_padding_mode(self):
114106
pass
115107

116-
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
117-
def test_simple_inference_with_partial_text_lora(self):
118-
pass
119-
120108
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
121109
@skip_mps
122110
@pytest.mark.xfail(

tests/lora/test_lora_layers_mochi.py

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -115,22 +115,8 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
115115
def test_lora_actions(self, action, components_to_add):
116116
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)
117117

118-
@unittest.skip("Not supported in Mochi.")
119-
def test_simple_inference_with_text_denoiser_block_scale(self):
120-
pass
121-
122-
@unittest.skip("Not supported in Mochi.")
123-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
124-
pass
125-
126118
@unittest.skip("Not supported in Mochi.")
127119
def test_modify_padding_mode(self):
128120
pass
129121

130-
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
131-
def test_simple_inference_with_partial_text_lora(self):
132-
pass
133-
134-
@unittest.skip("Not supported in Mochi.")
135-
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
136-
pass
122+
# TODO: skip them properly

tests/lora/test_lora_layers_sana.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -108,15 +108,3 @@ def get_dummy_inputs(self, with_generator=True):
108108
@unittest.skip("Not supported in SANA.")
109109
def test_modify_padding_mode(self):
110110
pass
111-
112-
@unittest.skip("Not supported in SANA.")
113-
def test_simple_inference_with_text_denoiser_block_scale(self):
114-
pass
115-
116-
@unittest.skip("Not supported in SANA.")
117-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
118-
pass
119-
120-
@unittest.skip("Text encoder LoRA is not supported in SANA.")
121-
def test_simple_inference_with_partial_text_lora(self):
122-
pass

tests/lora/test_lora_layers_sd3.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -114,17 +114,7 @@ def test_sd3_lora(self):
114114
lora_filename = "lora_peft_format.safetensors"
115115
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
116116

117-
@unittest.skip("Not supported in SD3.")
118-
def test_simple_inference_with_text_denoiser_block_scale(self):
119-
pass
120-
121-
@unittest.skip("Not supported in SD3.")
122-
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
123-
pass
124-
125-
@unittest.skip("Not supported in SD3.")
126-
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
127-
pass
117+
# TODO: skip them properly
128118

129119
@unittest.skip("Not supported in SD3.")
130120
def test_modify_padding_mode(self):

0 commit comments

Comments
 (0)