Skip to content

Commit 9b2e58d

Browse files
committed
fixes
1 parent 6e613f9 commit 9b2e58d

File tree

5 files changed

+20
-37
lines changed

5 files changed

+20
-37
lines changed

tests/pipelines/controlnet/test_controlnet_sdxl.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,10 @@ def test_xformers_attention_forwardGenerator_pass(self):
210210
def test_inference_batch_single_identical(self):
211211
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
212212

213+
@unittest.skip("We test this functionality elsewhere already.")
214+
def test_save_load_optional_components(self):
215+
pass
216+
213217
@require_torch_accelerator
214218
def test_stable_diffusion_xl_offloads(self):
215219
pipes = []
@@ -641,9 +645,6 @@ def test_xformers_attention_forwardGenerator_pass(self):
641645
def test_inference_batch_single_identical(self):
642646
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
643647

644-
def test_save_load_optional_components(self):
645-
return self._test_save_load_optional_components()
646-
647648

648649
class StableDiffusionXLMultiControlNetOneModelPipelineFastTests(
649650
PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
@@ -818,6 +819,10 @@ def test_control_guidance_switch(self):
818819
def test_attention_slicing_forward_pass(self):
819820
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
820821

822+
@unittest.skip("We test this functionality elsewhere already.")
823+
def test_save_load_optional_components(self):
824+
pass
825+
821826
@unittest.skipIf(
822827
torch_device != "cuda" or not is_xformers_available(),
823828
reason="XFormers attention is only available with CUDA and `xformers` installed",

tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,10 @@ def test_xformers_attention_forwardGenerator_pass(self):
199199
def test_inference_batch_single_identical(self):
200200
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
201201

202+
@unittest.skip("We test this functionality elsewhere already.")
203+
def test_save_load_optional_components(self):
204+
pass
205+
202206
@require_torch_accelerator
203207
# Copied from test_controlnet_sdxl.py
204208
def test_stable_diffusion_xl_offloads(self):

tests/pipelines/lumina2/test_pipeline_lumina2.py

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import unittest
22

3-
import numpy as np
43
import torch
54
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
65

@@ -10,7 +9,6 @@
109
Lumina2Text2ImgPipeline,
1110
Lumina2Transformer2DModel,
1211
)
13-
from diffusers.utils.testing_utils import torch_device
1412

1513
from ..test_pipelines_common import PipelineTesterMixin
1614

@@ -116,32 +114,3 @@ def get_dummy_inputs(self, device, seed=0):
116114
"output_type": "np",
117115
}
118116
return inputs
119-
120-
def test_lumina_prompt_embeds(self):
121-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
122-
inputs = self.get_dummy_inputs(torch_device)
123-
124-
output_with_prompt = pipe(**inputs).images[0]
125-
126-
inputs = self.get_dummy_inputs(torch_device)
127-
prompt = inputs.pop("prompt")
128-
129-
do_classifier_free_guidance = inputs["guidance_scale"] > 1
130-
(
131-
prompt_embeds,
132-
prompt_attention_mask,
133-
negative_prompt_embeds,
134-
negative_prompt_attention_mask,
135-
) = pipe.encode_prompt(
136-
prompt,
137-
do_classifier_free_guidance=do_classifier_free_guidance,
138-
device=torch_device,
139-
)
140-
output_with_embeds = pipe(
141-
prompt_embeds=prompt_embeds,
142-
prompt_attention_mask=prompt_attention_mask,
143-
**inputs,
144-
).images[0]
145-
146-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
147-
assert max_diff < 1e-4

tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -294,6 +294,10 @@ def test_stable_diffusion_xl_offloads(self):
294294
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
295295
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
296296

297+
@unittest.skip("We test this functionality elsewhere already.")
298+
def test_save_load_optional_components(self):
299+
pass
300+
297301
def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self):
298302
components = self.get_dummy_components()
299303
pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device)

tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -370,9 +370,6 @@ def test_total_downscale_factor(self, adapter_type):
370370
expected_out_image_size,
371371
)
372372

373-
def test_save_load_optional_components(self):
374-
return self._test_save_load_optional_components()
375-
376373
def test_adapter_sdxl_lcm(self):
377374
device = "cpu" # ensure determinism for the device-dependent torch.Generator
378375

@@ -512,6 +509,10 @@ def test_inference_batch_consistent(
512509

513510
logger.setLevel(level=diffusers.logging.WARNING)
514511

512+
@unittest.skip("We test this functionality elsewhere already.")
513+
def test_save_load_optional_components(self):
514+
pass
515+
515516
def test_num_images_per_prompt(self):
516517
components = self.get_dummy_components()
517518
pipe = self.pipeline_class(**components)

0 commit comments

Comments
 (0)