We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6508da6 commit 6ee6b53Copy full SHA for 6ee6b53
src/diffusers/pipelines/wan/pipeline_wan_video2video.py
@@ -419,12 +419,7 @@ def prepare_latents(
419
)
420
421
if latents is None:
422
- if isinstance(generator, list):
423
- init_latents = [
424
- retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator[i]) for i in range(batch_size)
425
- ]
426
- else:
427
- init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video]
+ init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), sample_mode="argmax") for vid in video]
428
429
init_latents = torch.cat(init_latents, dim=0).to(dtype)
430
0 commit comments