Skip to content

Commit ffe821c

Browse files
committed
updates
1 parent 6734a12 commit ffe821c

File tree

16 files changed

+92
-3
lines changed

16 files changed

+92
-3
lines changed

tests/pipelines/audioldm2/test_audioldm2.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -508,9 +508,14 @@ def test_to_dtype(self):
508508
model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")}
509509
self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values()))
510510

511+
@unittest.skip("Test not supported.")
511512
def test_sequential_cpu_offload_forward_pass(self):
512513
pass
513514

515+
@unittest.skip("Test not supported for now because of the use of `projection_model` in `encode_prompt()`.")
516+
def test_encode_prompt_works_in_isolation(self):
517+
pass
518+
514519

515520
@nightly
516521
class AudioLDM2PipelineSlowTests(unittest.TestCase):

tests/pipelines/cogview3/test_cogview3plus.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,9 @@ def test_attention_slicing_forward_pass(
231231
"Attention slicing should not affect the inference results",
232232
)
233233

234+
def test_encode_prompt_works_in_isolation(self):
235+
return super().test_encode_prompt_works_in_isolation(atol=1e-3, rtol=1e-3)
236+
234237

235238
@slow
236239
@require_torch_accelerator

tests/pipelines/controlnet_xs/test_controlnetxs.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,13 @@ def test_to_device(self):
334334
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
335335
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
336336

337+
def test_encode_prompt_works_in_isolation(self):
338+
extra_required_param_value_dict = {
339+
"device": torch.device(torch_device).type,
340+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
341+
}
342+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
343+
337344

338345
@slow
339346
@require_torch_accelerator

tests/pipelines/i2vgen_xl/test_i2vgenxl.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,10 @@ def test_num_videos_per_prompt(self):
228228

229229
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
230230

231+
@unittest.skip("Test not supported for now.")
232+
def test_encode_prompt_works_in_isolation(self):
233+
pass
234+
231235

232236
@slow
233237
@require_torch_accelerator

tests/pipelines/latent_consistency_models/test_latent_consistency_models.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,13 @@ def callback_inputs_test(pipe, i, t, callback_kwargs):
213213
output = pipe(**inputs)[0]
214214
assert output.abs().sum() == 0
215215

216+
def test_encode_prompt_works_in_isolation(self):
217+
extra_required_param_value_dict = {
218+
"device": torch.device(torch_device).type,
219+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
220+
}
221+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
222+
216223

217224
@slow
218225
@require_torch_gpu

tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,13 @@ def callback_inputs_test(pipe, i, t, callback_kwargs):
220220
output = pipe(**inputs)[0]
221221
assert output.abs().sum() == 0
222222

223+
def test_encode_prompt_works_in_isolation(self):
224+
extra_required_param_value_dict = {
225+
"device": torch.device(torch_device).type,
226+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
227+
}
228+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
229+
223230

224231
@slow
225232
@require_torch_gpu

tests/pipelines/pia/test_pia.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -437,3 +437,11 @@ def test_xformers_attention_forwardGenerator_pass(self):
437437

438438
max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()
439439
self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results")
440+
441+
def test_encode_prompt_works_in_isolation(self):
442+
extra_required_param_value_dict = {
443+
"device": torch.device(torch_device).type,
444+
"num_images_per_prompt": 1,
445+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
446+
}
447+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)

tests/pipelines/stable_diffusion_gligen_text_image/test_stable_diffusion_gligen_text_image.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,3 +207,9 @@ def test_attention_slicing_forward_pass(self):
207207

208208
def test_inference_batch_single_identical(self):
209209
super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
210+
211+
@unittest.skip(
212+
"Test not supported because of the use of `text_encoder` in `get_cross_attention_kwargs_with_grounded()`."
213+
)
214+
def test_encode_prompt_works_in_isolation(self):
215+
pass

tests/pipelines/stable_diffusion_sag/test_stable_diffusion_sag.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,13 @@ def test_pipeline_different_schedulers(self):
153153
# Karras schedulers are not supported
154154
image = pipeline(**inputs).images[0]
155155

156+
def test_encode_prompt_works_in_isolation(self):
157+
extra_required_param_value_dict = {
158+
"device": torch.device(torch_device).type,
159+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
160+
}
161+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
162+
156163

157164
@nightly
158165
@require_torch_gpu

tests/pipelines/stable_unclip/test_stable_unclip_img2img.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,10 @@ def test_inference_batch_single_identical(self):
207207
def test_xformers_attention_forwardGenerator_pass(self):
208208
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False)
209209

210+
@unittest.skip("Test not supported at the moment.")
211+
def test_encode_prompt_works_in_isolation(self):
212+
pass
213+
210214

211215
@nightly
212216
@require_torch_gpu

0 commit comments

Comments
 (0)