|
| 1 | +import torch |
| 2 | +from PIL import Image |
| 3 | +import librosa |
| 4 | +from diffsynth import VideoData, save_video_with_audio |
| 5 | +from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig |
| 6 | +from modelscope import dataset_snapshot_download |
| 7 | + |
| 8 | +pipe = WanVideoPipeline.from_pretrained( |
| 9 | + torch_dtype=torch.bfloat16, |
| 10 | + device="cuda", |
| 11 | + model_configs=[ |
| 12 | + ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors"), |
| 13 | + ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="wav2vec2-large-xlsr-53-english/model.safetensors"), |
| 14 | + ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"), |
| 15 | + ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="Wan2.1_VAE.pth"), |
| 16 | + ], |
| 17 | + audio_processor_config=ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="wav2vec2-large-xlsr-53-english/"), |
| 18 | +) |
| 19 | +dataset_snapshot_download( |
| 20 | + dataset_id="DiffSynth-Studio/example_video_dataset", |
| 21 | + local_dir="./data/example_video_dataset", |
| 22 | + allow_file_pattern=f"wans2v/*" |
| 23 | +) |
| 24 | + |
| 25 | +num_frames = 81 # 4n+1 |
| 26 | +height = 448 |
| 27 | +width = 832 |
| 28 | + |
| 29 | +prompt = "a person is singing" |
| 30 | +negative_prompt = "画面模糊,最差质量,画面模糊,细节模糊不清,情绪激动剧烈,手快速抖动,字幕,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" |
| 31 | +input_image = Image.open("data/example_video_dataset/wans2v/pose.png").convert("RGB").resize((width, height)) |
| 32 | +# s2v audio input, recommend 16kHz sampling rate |
| 33 | +audio_path = 'data/example_video_dataset/wans2v/sing.MP3' |
| 34 | +input_audio, sample_rate = librosa.load(audio_path, sr=16000) |
| 35 | + |
| 36 | +# Speech-to-video |
| 37 | +video = pipe( |
| 38 | + prompt=prompt, |
| 39 | + input_image=input_image, |
| 40 | + negative_prompt=negative_prompt, |
| 41 | + seed=0, |
| 42 | + num_frames=num_frames, |
| 43 | + height=height, |
| 44 | + width=width, |
| 45 | + audio_sample_rate=sample_rate, |
| 46 | + input_audio=input_audio, |
| 47 | + num_inference_steps=40, |
| 48 | +) |
| 49 | +save_video_with_audio(video[1:], "video_with_audio.mp4", audio_path, fps=16, quality=5) |
| 50 | + |
| 51 | +# s2v will use the first (num_frames) frames as reference. height and width must be the same as input_image. And fps should be 16, the same as output video fps. |
| 52 | +pose_video_path = 'data/example_video_dataset/wans2v/pose.mp4' |
| 53 | +pose_video = VideoData(pose_video_path, height=height, width=width) |
| 54 | + |
| 55 | +# Speech-to-video with pose |
| 56 | +video = pipe( |
| 57 | + prompt=prompt, |
| 58 | + input_image=input_image, |
| 59 | + negative_prompt=negative_prompt, |
| 60 | + seed=0, |
| 61 | + num_frames=num_frames, |
| 62 | + height=height, |
| 63 | + width=width, |
| 64 | + audio_sample_rate=sample_rate, |
| 65 | + input_audio=input_audio, |
| 66 | + s2v_pose_video=pose_video, |
| 67 | + num_inference_steps=40, |
| 68 | +) |
| 69 | +save_video_with_audio(video[1:], "video_pose_with_audio.mp4", audio_path, fps=16, quality=5) |
0 commit comments