Skip to content

Commit 8c004ea

Browse files
committed
updates
1 parent b148bab commit 8c004ea

23 files changed

+173
-8
lines changed

tests/pipelines/animatediff/test_animatediff.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -547,6 +547,14 @@ def test_xformers_attention_forwardGenerator_pass(self):
547547
def test_vae_slicing(self):
548548
return super().test_vae_slicing(image_count=2)
549549

550+
def test_encode_prompt_works_in_isolation(self):
551+
extra_required_param_value_dict = {
552+
"device": torch.device(torch_device).type,
553+
"num_images_per_prompt": 1,
554+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
555+
}
556+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
557+
550558

551559
@slow
552560
@require_torch_accelerator

tests/pipelines/animatediff/test_animatediff_controlnet.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -517,3 +517,11 @@ def test_vae_slicing(self, video_count=2):
517517
output_2 = pipe(**inputs)
518518

519519
assert np.abs(output_2[0].flatten() - output_1[0].flatten()).max() < 1e-2
520+
521+
def test_encode_prompt_works_in_isolation(self):
522+
extra_required_param_value_dict = {
523+
"device": torch.device(torch_device).type,
524+
"num_images_per_prompt": 1,
525+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
526+
}
527+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)

tests/pipelines/animatediff/test_animatediff_sdxl.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -305,3 +305,7 @@ def test_xformers_attention_forwardGenerator_pass(self):
305305

306306
max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()
307307
self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results")
308+
309+
@unittest.skip("Test currently not supported.")
310+
def test_encode_prompt_works_in_isolation(self):
311+
pass

tests/pipelines/animatediff/test_animatediff_sparsectrl.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -484,3 +484,11 @@ def test_free_init_with_schedulers(self):
484484

485485
def test_vae_slicing(self):
486486
return super().test_vae_slicing(image_count=2)
487+
488+
def test_encode_prompt_works_in_isolation(self):
489+
extra_required_param_value_dict = {
490+
"device": torch.device(torch_device).type,
491+
"num_images_per_prompt": 1,
492+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
493+
}
494+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)

tests/pipelines/animatediff/test_animatediff_video2video.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -544,3 +544,11 @@ def test_free_noise_multi_prompt(self):
544544
inputs["strength"] = 0.5
545545
inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"}
546546
pipe(**inputs).frames[0]
547+
548+
def test_encode_prompt_works_in_isolation(self):
549+
extra_required_param_value_dict = {
550+
"device": torch.device(torch_device).type,
551+
"num_images_per_prompt": 1,
552+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
553+
}
554+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)

tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -533,3 +533,11 @@ def test_free_noise_multi_prompt(self):
533533
inputs["strength"] = 0.5
534534
inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"}
535535
pipe(**inputs).frames[0]
536+
537+
def test_encode_prompt_works_in_isolation(self):
538+
extra_required_param_value_dict = {
539+
"device": torch.device(torch_device).type,
540+
"num_images_per_prompt": 1,
541+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
542+
}
543+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)

tests/pipelines/blipdiffusion/test_blipdiffusion.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,3 +198,7 @@ def test_blipdiffusion(self):
198198
assert (
199199
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
200200
), f" expected_slice {image_slice.flatten()}, but got {image_slice.flatten()}"
201+
202+
@unittest.skip("Test not supported because of complexities in deriving query_embeds.")
203+
def test_encode_prompt_works_in_isolation(self):
204+
pass

tests/pipelines/controlnet/test_controlnet.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -287,6 +287,13 @@ def test_controlnet_lcm_custom_timesteps(self):
287287

288288
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
289289

290+
def test_encode_prompt_works_in_isolation(self):
291+
extra_required_param_value_dict = {
292+
"device": torch.device(torch_device).type,
293+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
294+
}
295+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
296+
290297

291298
class StableDiffusionMultiControlNetPipelineFastTests(
292299
IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
@@ -521,6 +528,13 @@ def test_inference_multiple_prompt_input(self):
521528

522529
assert image.shape == (4, 64, 64, 3)
523530

531+
def test_encode_prompt_works_in_isolation(self):
532+
extra_required_param_value_dict = {
533+
"device": torch.device(torch_device).type,
534+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
535+
}
536+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
537+
524538

525539
class StableDiffusionMultiControlNetOneModelPipelineFastTests(
526540
IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
@@ -706,6 +720,13 @@ def test_save_pretrained_raise_not_implemented_exception(self):
706720
except NotImplementedError:
707721
pass
708722

723+
def test_encode_prompt_works_in_isolation(self):
724+
extra_required_param_value_dict = {
725+
"device": torch.device(torch_device).type,
726+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
727+
}
728+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
729+
709730

710731
@slow
711732
@require_torch_accelerator

tests/pipelines/controlnet/test_controlnet_blip_diffusion.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -222,3 +222,7 @@ def test_blipdiffusion_controlnet(self):
222222
assert (
223223
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
224224
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
225+
226+
@unittest.skip("Test not supported because of complexities in deriving query_embeds.")
227+
def test_encode_prompt_works_in_isolation(self):
228+
pass

tests/pipelines/controlnet/test_controlnet_img2img.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,13 @@ def test_xformers_attention_forwardGenerator_pass(self):
189189
def test_inference_batch_single_identical(self):
190190
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
191191

192+
def test_encode_prompt_works_in_isolation(self):
193+
extra_required_param_value_dict = {
194+
"device": torch.device(torch_device).type,
195+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
196+
}
197+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
198+
192199

193200
class StableDiffusionMultiControlNetPipelineFastTests(
194201
IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
@@ -391,6 +398,13 @@ def test_save_pretrained_raise_not_implemented_exception(self):
391398
except NotImplementedError:
392399
pass
393400

401+
def test_encode_prompt_works_in_isolation(self):
402+
extra_required_param_value_dict = {
403+
"device": torch.device(torch_device).type,
404+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
405+
}
406+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
407+
394408

395409
@slow
396410
@require_torch_accelerator

0 commit comments

Comments
 (0)