Skip to content

Commit 8200b27

Browse files
committed
updates
1 parent 4e39d21 commit 8200b27

File tree

11 files changed

+48
-7
lines changed

11 files changed

+48
-7
lines changed

src/diffusers/pipelines/pag/pipeline_pag_sana.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,8 @@ def encode_prompt(
268268
else:
269269
batch_size = prompt_embeds.shape[0]
270270

271-
self.tokenizer.padding_side = "right"
271+
if getattr(self, "tokenizer", None) is not None:
272+
self.tokenizer.padding_side = "right"
272273

273274
# See Section 3.1. of the paper.
274275
max_length = max_sequence_length

src/diffusers/pipelines/sana/pipeline_sana.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,8 @@ def encode_prompt(
312312
else:
313313
batch_size = prompt_embeds.shape[0]
314314

315-
self.tokenizer.padding_side = "right"
315+
if getattr(self, "tokenizer", None) is not None:
316+
self.tokenizer.padding_side = "right"
316317

317318
# See Section 3.1. of the paper.
318319
max_length = max_sequence_length

tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,12 @@ def test_save_load_optional_components(self):
178178
# TODO(YiYi) need to fix later
179179
pass
180180

181+
@unittest.skip(
182+
"Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have."
183+
)
184+
def test_encode_prompt_works_in_isolation(self):
185+
pass
186+
181187

182188
@slow
183189
@require_torch_accelerator

tests/pipelines/hunyuan_dit/test_hunyuan_dit.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -298,6 +298,12 @@ def test_fused_qkv_projections(self):
298298
original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2
299299
), "Original outputs should match when fused QKV projections are disabled."
300300

301+
@unittest.skip(
302+
"Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have."
303+
)
304+
def test_encode_prompt_works_in_isolation(self):
305+
pass
306+
301307

302308
@slow
303309
@require_torch_accelerator

tests/pipelines/kolors/test_kolors_img2img.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,3 +152,7 @@ def test_inference_batch_single_identical(self):
152152

153153
def test_float16_inference(self):
154154
super().test_float16_inference(expected_max_diff=7e-2)
155+
156+
@unittest.skip("Test not supported because kolors img2img doesn't take pooled embeds as inputs unline kolors t2i.")
157+
def test_encode_prompt_works_in_isolation(self):
158+
pass

tests/pipelines/latte/test_latte.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,10 @@ def test_save_load_optional_components(self):
279279
def test_xformers_attention_forwardGenerator_pass(self):
280280
super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
281281

282+
@unittest.skip("Test not supported because `encode_prompt()` has multiple returns.")
283+
def test_encode_prompt_works_in_isolation(self):
284+
pass
285+
282286

283287
@slow
284288
@require_torch_gpu

tests/pipelines/pag/test_pag_hunyuan_dit.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -357,9 +357,8 @@ def test_pag_applied_layers(self):
357357
pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False)
358358
assert len(pipe.pag_attn_processors) == 2
359359

360+
@unittest.skip(
361+
"Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have."
362+
)
360363
def test_encode_prompt_works_in_isolation(self):
361-
extra_required_param_value_dict = {
362-
"device": torch.device(torch_device).type,
363-
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
364-
}
365-
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
364+
pass

tests/pipelines/stable_audio/test_stable_audio.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -413,6 +413,10 @@ def test_sequential_cpu_offload_forward_pass(self):
413413
def test_sequential_offload_forward_pass_twice(self):
414414
pass
415415

416+
@unittest.skip("Test not supported because `rotary_embed_dim` doesn't have any sensible default.")
417+
def test_encode_prompt_works_in_isolation(self):
418+
pass
419+
416420

417421
@nightly
418422
@require_torch_gpu

tests/pipelines/stable_cascade/test_stable_cascade_decoder.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,14 @@ def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings_with_gui
307307
batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt
308308
)
309309

310+
def test_encode_prompt_works_in_isolation(self):
311+
extra_required_param_value_dict = {
312+
"device": torch.device(torch_device).type,
313+
"batch_size": 1,
314+
"do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0,
315+
}
316+
return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict)
317+
310318

311319
@slow
312320
@require_torch_gpu

tests/pipelines/stable_cascade/test_stable_cascade_prior.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -275,6 +275,10 @@ def test_stable_cascade_decoder_prompt_embeds(self):
275275

276276
assert np.abs(output_prompt.image_embeddings - output_prompt_embeds.image_embeddings).max() < 1e-5
277277

278+
@unittest.skip("Test not supported because dtype determination relies on text encoder.")
279+
def test_encode_prompt_works_in_isolation(self):
280+
pass
281+
278282

279283
@slow
280284
@require_torch_gpu

0 commit comments

Comments
 (0)