Skip to content

Commit bc07f9f

Browse files
committed
remove debug prints
1 parent 33c7cd6 commit bc07f9f

File tree

1 file changed

+0
-3
lines changed

1 file changed

+0
-3
lines changed

src/diffusers/pipelines/cogvideo/pipeline_cogvideox_image2video.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,6 @@ def prepare_latents(
366366

367367
assert image.ndim == 4
368368
image = image.unsqueeze(2) # [B, C, F, H, W]
369-
print(image.shape)
370369

371370
if isinstance(generator, list):
372371
if len(generator) != batch_size:
@@ -392,7 +391,6 @@ def prepare_latents(
392391
width // self.vae_scale_factor_spatial,
393392
)
394393
latent_padding = torch.zeros(padding_shape, device=device, dtype=dtype)
395-
print(image_latents.shape, latent_padding.shape)
396394
image_latents = torch.cat([image_latents, latent_padding], dim=1)
397395

398396
if latents is None:
@@ -758,7 +756,6 @@ def __call__(
758756

759757
latent_image_input = torch.cat([image_latents] * 2) if do_classifier_free_guidance else image_latents
760758
latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=2)
761-
print(latent_model_input.shape)
762759

763760
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
764761
timestep = t.expand(latent_model_input.shape[0])

0 commit comments

Comments
 (0)