Skip to content

Commit b1c9666

Browse files
committed
remove tests that directly leveraged encode_prompt() in some way or the other.
1 parent 76aaab0 commit b1c9666

12 files changed

+3
-638
lines changed

tests/pipelines/animatediff/test_animatediff_sdxl.py

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -248,33 +248,6 @@ def test_to_dtype(self):
248248
model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")]
249249
self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes))
250250

251-
def test_prompt_embeds(self):
252-
components = self.get_dummy_components()
253-
pipe = self.pipeline_class(**components)
254-
pipe.set_progress_bar_config(disable=None)
255-
pipe.to(torch_device)
256-
257-
inputs = self.get_dummy_inputs(torch_device)
258-
prompt = inputs.pop("prompt")
259-
260-
(
261-
prompt_embeds,
262-
negative_prompt_embeds,
263-
pooled_prompt_embeds,
264-
negative_pooled_prompt_embeds,
265-
) = pipe.encode_prompt(prompt)
266-
267-
pipe(
268-
**inputs,
269-
prompt_embeds=prompt_embeds,
270-
negative_prompt_embeds=negative_prompt_embeds,
271-
pooled_prompt_embeds=pooled_prompt_embeds,
272-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
273-
)
274-
275-
def test_save_load_optional_components(self):
276-
self._test_save_load_optional_components()
277-
278251
@unittest.skipIf(
279252
torch_device != "cuda" or not is_xformers_available(),
280253
reason="XFormers attention is only available with CUDA and `xformers` installed",

tests/pipelines/aura_flow/test_pipeline_aura_flow.py

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,6 @@
55
from transformers import AutoTokenizer, UMT5EncoderModel
66

77
from diffusers import AuraFlowPipeline, AuraFlowTransformer2DModel, AutoencoderKL, FlowMatchEulerDiscreteScheduler
8-
from diffusers.utils.testing_utils import (
9-
torch_device,
10-
)
118

129
from ..test_pipelines_common import (
1310
PipelineTesterMixin,
@@ -90,37 +87,6 @@ def get_dummy_inputs(self, device, seed=0):
9087
}
9188
return inputs
9289

93-
def test_aura_flow_prompt_embeds(self):
94-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
95-
inputs = self.get_dummy_inputs(torch_device)
96-
97-
output_with_prompt = pipe(**inputs).images[0]
98-
99-
inputs = self.get_dummy_inputs(torch_device)
100-
prompt = inputs.pop("prompt")
101-
102-
do_classifier_free_guidance = inputs["guidance_scale"] > 1
103-
(
104-
prompt_embeds,
105-
prompt_attention_mask,
106-
negative_prompt_embeds,
107-
negative_prompt_attention_mask,
108-
) = pipe.encode_prompt(
109-
prompt,
110-
do_classifier_free_guidance=do_classifier_free_guidance,
111-
device=torch_device,
112-
)
113-
output_with_embeds = pipe(
114-
prompt_embeds=prompt_embeds,
115-
prompt_attention_mask=prompt_attention_mask,
116-
negative_prompt_embeds=negative_prompt_embeds,
117-
negative_prompt_attention_mask=negative_prompt_attention_mask,
118-
**inputs,
119-
).images[0]
120-
121-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
122-
assert max_diff < 1e-4
123-
12490
def test_attention_slicing_forward_pass(self):
12591
# Attention slicing needs to implemented differently for this because how single DiT and MMDiT
12692
# blocks interfere with each other.

tests/pipelines/controlnet/test_controlnet_sdxl.py

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -296,45 +296,6 @@ def test_stable_diffusion_xl_multi_prompts(self):
296296
# ensure the results are not equal
297297
assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
298298

299-
# Copied from test_stable_diffusion_xl.py
300-
def test_stable_diffusion_xl_prompt_embeds(self):
301-
components = self.get_dummy_components()
302-
sd_pipe = self.pipeline_class(**components)
303-
sd_pipe = sd_pipe.to(torch_device)
304-
sd_pipe = sd_pipe.to(torch_device)
305-
sd_pipe.set_progress_bar_config(disable=None)
306-
307-
# forward without prompt embeds
308-
inputs = self.get_dummy_inputs(torch_device)
309-
inputs["prompt"] = 2 * [inputs["prompt"]]
310-
inputs["num_images_per_prompt"] = 2
311-
312-
output = sd_pipe(**inputs)
313-
image_slice_1 = output.images[0, -3:, -3:, -1]
314-
315-
# forward with prompt embeds
316-
inputs = self.get_dummy_inputs(torch_device)
317-
prompt = 2 * [inputs.pop("prompt")]
318-
319-
(
320-
prompt_embeds,
321-
negative_prompt_embeds,
322-
pooled_prompt_embeds,
323-
negative_pooled_prompt_embeds,
324-
) = sd_pipe.encode_prompt(prompt)
325-
326-
output = sd_pipe(
327-
**inputs,
328-
prompt_embeds=prompt_embeds,
329-
negative_prompt_embeds=negative_prompt_embeds,
330-
pooled_prompt_embeds=pooled_prompt_embeds,
331-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
332-
)
333-
image_slice_2 = output.images[0, -3:, -3:, -1]
334-
335-
# make sure that it's equal
336-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
337-
338299
def test_controlnet_sdxl_guess(self):
339300
device = "cpu"
340301

tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -327,42 +327,3 @@ def test_stable_diffusion_xl_multi_prompts(self):
327327

328328
# ensure the results are not equal
329329
assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
330-
331-
# Copied from test_stable_diffusion_xl.py
332-
def test_stable_diffusion_xl_prompt_embeds(self):
333-
components = self.get_dummy_components()
334-
sd_pipe = self.pipeline_class(**components)
335-
sd_pipe = sd_pipe.to(torch_device)
336-
sd_pipe = sd_pipe.to(torch_device)
337-
sd_pipe.set_progress_bar_config(disable=None)
338-
339-
# forward without prompt embeds
340-
inputs = self.get_dummy_inputs(torch_device)
341-
inputs["prompt"] = 2 * [inputs["prompt"]]
342-
inputs["num_images_per_prompt"] = 2
343-
344-
output = sd_pipe(**inputs)
345-
image_slice_1 = output.images[0, -3:, -3:, -1]
346-
347-
# forward with prompt embeds
348-
inputs = self.get_dummy_inputs(torch_device)
349-
prompt = 2 * [inputs.pop("prompt")]
350-
351-
(
352-
prompt_embeds,
353-
negative_prompt_embeds,
354-
pooled_prompt_embeds,
355-
negative_pooled_prompt_embeds,
356-
) = sd_pipe.encode_prompt(prompt)
357-
358-
output = sd_pipe(
359-
**inputs,
360-
prompt_embeds=prompt_embeds,
361-
negative_prompt_embeds=negative_prompt_embeds,
362-
pooled_prompt_embeds=pooled_prompt_embeds,
363-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
364-
)
365-
image_slice_2 = output.images[0, -3:, -3:, -1]
366-
367-
# make sure that it's equal
368-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4

tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -283,49 +283,6 @@ def test_stable_diffusion_xl_multi_prompts(self):
283283
# ensure the results are not equal
284284
assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4
285285

286-
# Copied from test_stable_diffusion_xl.py
287-
def test_stable_diffusion_xl_prompt_embeds(self):
288-
components = self.get_dummy_components()
289-
sd_pipe = self.pipeline_class(**components)
290-
sd_pipe = sd_pipe.to(torch_device)
291-
sd_pipe = sd_pipe.to(torch_device)
292-
sd_pipe.set_progress_bar_config(disable=None)
293-
294-
# forward without prompt embeds
295-
inputs = self.get_dummy_inputs(torch_device)
296-
inputs["prompt"] = 2 * [inputs["prompt"]]
297-
inputs["num_images_per_prompt"] = 2
298-
299-
output = sd_pipe(**inputs)
300-
image_slice_1 = output.images[0, -3:, -3:, -1]
301-
302-
# forward with prompt embeds
303-
inputs = self.get_dummy_inputs(torch_device)
304-
prompt = 2 * [inputs.pop("prompt")]
305-
306-
(
307-
prompt_embeds,
308-
negative_prompt_embeds,
309-
pooled_prompt_embeds,
310-
negative_pooled_prompt_embeds,
311-
) = sd_pipe.encode_prompt(prompt)
312-
313-
output = sd_pipe(
314-
**inputs,
315-
prompt_embeds=prompt_embeds,
316-
negative_prompt_embeds=negative_prompt_embeds,
317-
pooled_prompt_embeds=pooled_prompt_embeds,
318-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
319-
)
320-
image_slice_2 = output.images[0, -3:, -3:, -1]
321-
322-
# make sure that it's equal
323-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1.1e-4
324-
325-
# Copied from test_stable_diffusion_xl.py
326-
def test_save_load_optional_components(self):
327-
self._test_save_load_optional_components()
328-
329286
# Copied from test_controlnetxs.py
330287
def test_to_dtype(self):
331288
components = self.get_dummy_components()

tests/pipelines/stable_diffusion/test_stable_diffusion.py

Lines changed: 0 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -375,84 +375,6 @@ def test_stable_diffusion_negative_prompt_embeds(self):
375375

376376
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
377377

378-
def test_stable_diffusion_prompt_embeds_no_text_encoder_or_tokenizer(self):
379-
components = self.get_dummy_components()
380-
sd_pipe = StableDiffusionPipeline(**components)
381-
sd_pipe = sd_pipe.to(torch_device)
382-
sd_pipe.set_progress_bar_config(disable=None)
383-
384-
inputs = self.get_dummy_inputs(torch_device)
385-
inputs["negative_prompt"] = "this is a negative prompt"
386-
387-
# forward
388-
output = sd_pipe(**inputs)
389-
image_slice_1 = output.images[0, -3:, -3:, -1]
390-
391-
inputs = self.get_dummy_inputs(torch_device)
392-
prompt = inputs.pop("prompt")
393-
negative_prompt = "this is a negative prompt"
394-
395-
prompt_embeds, negative_prompt_embeds = sd_pipe.encode_prompt(
396-
prompt,
397-
torch_device,
398-
1,
399-
True,
400-
negative_prompt=negative_prompt,
401-
prompt_embeds=None,
402-
negative_prompt_embeds=None,
403-
)
404-
405-
inputs["prompt_embeds"] = prompt_embeds
406-
inputs["negative_prompt_embeds"] = negative_prompt_embeds
407-
408-
sd_pipe.text_encoder = None
409-
sd_pipe.tokenizer = None
410-
411-
# forward
412-
output = sd_pipe(**inputs)
413-
image_slice_2 = output.images[0, -3:, -3:, -1]
414-
415-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
416-
417-
def test_stable_diffusion_prompt_embeds_with_plain_negative_prompt_list(self):
418-
components = self.get_dummy_components()
419-
sd_pipe = StableDiffusionPipeline(**components)
420-
sd_pipe = sd_pipe.to(torch_device)
421-
sd_pipe = sd_pipe.to(torch_device)
422-
sd_pipe.set_progress_bar_config(disable=None)
423-
424-
inputs = self.get_dummy_inputs(torch_device)
425-
negative_prompt = 3 * ["this is a negative prompt"]
426-
inputs["negative_prompt"] = negative_prompt
427-
inputs["prompt"] = 3 * [inputs["prompt"]]
428-
429-
# forward
430-
output = sd_pipe(**inputs)
431-
image_slice_1 = output.images[0, -3:, -3:, -1]
432-
433-
inputs = self.get_dummy_inputs(torch_device)
434-
inputs["negative_prompt"] = negative_prompt
435-
prompt = 3 * [inputs.pop("prompt")]
436-
437-
text_inputs = sd_pipe.tokenizer(
438-
prompt,
439-
padding="max_length",
440-
max_length=sd_pipe.tokenizer.model_max_length,
441-
truncation=True,
442-
return_tensors="pt",
443-
)
444-
text_inputs = text_inputs["input_ids"].to(torch_device)
445-
446-
prompt_embeds = sd_pipe.text_encoder(text_inputs)[0]
447-
448-
inputs["prompt_embeds"] = prompt_embeds
449-
450-
# forward
451-
output = sd_pipe(**inputs)
452-
image_slice_2 = output.images[0, -3:, -3:, -1]
453-
454-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
455-
456378
def test_stable_diffusion_ddim_factor_8(self):
457379
device = "cpu" # ensure determinism for the device-dependent torch.Generator
458380

tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -156,39 +156,6 @@ def test_stable_diffusion_3_different_negative_prompts(self):
156156
# Outputs should be different here
157157
assert max_diff > 1e-2
158158

159-
def test_stable_diffusion_3_prompt_embeds(self):
160-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
161-
inputs = self.get_dummy_inputs(torch_device)
162-
163-
output_with_prompt = pipe(**inputs).images[0]
164-
165-
inputs = self.get_dummy_inputs(torch_device)
166-
prompt = inputs.pop("prompt")
167-
168-
do_classifier_free_guidance = inputs["guidance_scale"] > 1
169-
(
170-
prompt_embeds,
171-
negative_prompt_embeds,
172-
pooled_prompt_embeds,
173-
negative_pooled_prompt_embeds,
174-
) = pipe.encode_prompt(
175-
prompt,
176-
prompt_2=None,
177-
prompt_3=None,
178-
do_classifier_free_guidance=do_classifier_free_guidance,
179-
device=torch_device,
180-
)
181-
output_with_embeds = pipe(
182-
prompt_embeds=prompt_embeds,
183-
negative_prompt_embeds=negative_prompt_embeds,
184-
pooled_prompt_embeds=pooled_prompt_embeds,
185-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
186-
**inputs,
187-
).images[0]
188-
189-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
190-
assert max_diff < 1e-4
191-
192159
def test_fused_qkv_projections(self):
193160
device = "cpu" # ensure determinism for the device-dependent torch.Generator
194161
components = self.get_dummy_components()

tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py

Lines changed: 1 addition & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -159,39 +159,7 @@ def test_stable_diffusion_3_img2img_different_negative_prompts(self):
159159
# Outputs should be different here
160160
assert max_diff > 1e-2
161161

162-
def test_stable_diffusion_3_img2img_prompt_embeds(self):
163-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
164-
inputs = self.get_dummy_inputs(torch_device)
165-
166-
output_with_prompt = pipe(**inputs).images[0]
167-
168-
inputs = self.get_dummy_inputs(torch_device)
169-
prompt = inputs.pop("prompt")
170-
171-
do_classifier_free_guidance = inputs["guidance_scale"] > 1
172-
(
173-
prompt_embeds,
174-
negative_prompt_embeds,
175-
pooled_prompt_embeds,
176-
negative_pooled_prompt_embeds,
177-
) = pipe.encode_prompt(
178-
prompt,
179-
prompt_2=None,
180-
prompt_3=None,
181-
do_classifier_free_guidance=do_classifier_free_guidance,
182-
device=torch_device,
183-
)
184-
output_with_embeds = pipe(
185-
prompt_embeds=prompt_embeds,
186-
negative_prompt_embeds=negative_prompt_embeds,
187-
pooled_prompt_embeds=pooled_prompt_embeds,
188-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
189-
**inputs,
190-
).images[0]
191-
192-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
193-
assert max_diff < 1e-4
194-
162+
@unittest.skip("Skip for now.")
195163
def test_multi_vae(self):
196164
pass
197165

0 commit comments

Comments
 (0)