Skip to content

Commit 8222a55

Browse files
committed
apply suggestions from review
1 parent 0c1358c commit 8222a55

File tree

2 files changed

+1
-11
lines changed

2 files changed

+1
-11
lines changed

src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -362,16 +362,9 @@ def prepare_latents(
362362
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
363363
)
364364

365-
assert image.ndim == 4
366365
image = image.unsqueeze(2) # [B, C, F, H, W]
367366

368367
if isinstance(generator, list):
369-
if len(generator) != batch_size:
370-
raise ValueError(
371-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
372-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
373-
)
374-
375368
image_latents = [
376369
retrieve_latents(self.vae.encode(image[i].unsqueeze(0)), generator[i]) for i in range(batch_size)
377370
]

tests/pipelines/cogvideo/test_cogvideox_image2video.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ class CogVideoXPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
5454
"callback_on_step_end_tensor_inputs",
5555
]
5656
)
57+
test_xformers_attention = False
5758

5859
def get_dummy_components(self):
5960
torch.manual_seed(0)
@@ -283,10 +284,6 @@ def test_vae_tiling(self, expected_diff_max: float = 0.3):
283284
"VAE tiling should not affect the inference results",
284285
)
285286

286-
@unittest.skip("xformers attention processor does not exist for CogVideoX")
287-
def test_xformers_attention_forwardGenerator_pass(self):
288-
pass
289-
290287
def test_fused_qkv_projections(self):
291288
device = "cpu" # ensure determinism for the device-dependent torch.Generator
292289
components = self.get_dummy_components()

0 commit comments

Comments
 (0)