|  | 
| 52 | 52 |         >>> from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler | 
| 53 | 53 | 
 | 
| 54 | 54 |         >>> # Available models: Wan-AI/Wan2.1-T2V-14B-Diffusers, Wan-AI/Wan2.1-T2V-1.3B-Diffusers | 
| 55 |  | -        >>> model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers" | 
|  | 55 | +        >>> model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers" | 
| 56 | 56 |         >>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) | 
| 57 | 57 |         >>> pipe = WanVideoToVideoPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16) | 
| 58 |  | -        >>> flow_shift = 5.0  # 5.0 for 720P, 3.0 for 480P | 
|  | 58 | +        >>> flow_shift = 3.0  # 5.0 for 720P, 3.0 for 480P | 
| 59 | 59 |         >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift) | 
| 60 | 60 |         >>> pipe.to("cuda") | 
| 61 | 61 | 
 | 
| 62 | 62 |         >>> prompt = "A cat and a dog baking a cake together in a kitchen. The cat is carefully measuring flour, while the dog is stirring the batter with a wooden spoon. The kitchen is cozy, with sunlight streaming through the window." | 
| 63 | 63 |         >>> negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" | 
| 64 |  | -
 | 
|  | 64 | +        >>> video = load_video( | 
|  | 65 | +        ...     "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hiker.mp4" | 
|  | 66 | +        ... ) | 
| 65 | 67 |         >>> output = pipe( | 
|  | 68 | +        ...     video=video, | 
| 66 | 69 |         ...     prompt=prompt, | 
| 67 | 70 |         ...     negative_prompt=negative_prompt, | 
| 68 |  | -        ...     height=720, | 
| 69 |  | -        ...     width=1280, | 
| 70 |  | -        ...     num_frames=81, | 
|  | 71 | +        ...     height=480, | 
|  | 72 | +        ...     width=720, | 
| 71 | 73 |         ...     guidance_scale=5.0, | 
|  | 74 | +        ...     strength=0.7, | 
| 72 | 75 |         ... ).frames[0] | 
| 73 | 76 |         >>> export_to_video(output, "output.mp4", fps=16) | 
| 74 | 77 |         ``` | 
| @@ -341,6 +344,8 @@ def check_inputs( | 
| 341 | 344 |         negative_prompt, | 
| 342 | 345 |         height, | 
| 343 | 346 |         width, | 
|  | 347 | +        video=None, | 
|  | 348 | +        latents=None, | 
| 344 | 349 |         prompt_embeds=None, | 
| 345 | 350 |         negative_prompt_embeds=None, | 
| 346 | 351 |         callback_on_step_end_tensor_inputs=None, | 
| @@ -376,6 +381,9 @@ def check_inputs( | 
| 376 | 381 |         ): | 
| 377 | 382 |             raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") | 
| 378 | 383 | 
 | 
|  | 384 | +        if video is not None and latents is not None: | 
|  | 385 | +            raise ValueError("Only one of `video` or `latents` should be provided") | 
|  | 386 | + | 
| 379 | 387 |     def prepare_latents( | 
| 380 | 388 |         self, | 
| 381 | 389 |         video: Optional[torch.Tensor] = None, | 
| @@ -569,6 +577,8 @@ def __call__( | 
| 569 | 577 |             negative_prompt, | 
| 570 | 578 |             height, | 
| 571 | 579 |             width, | 
|  | 580 | +            video, | 
|  | 581 | +            latents, | 
| 572 | 582 |             prompt_embeds, | 
| 573 | 583 |             negative_prompt_embeds, | 
| 574 | 584 |             callback_on_step_end_tensor_inputs, | 
|  | 
0 commit comments