|
1 | 1 | import torch |
2 | 2 | from PIL import Image |
3 | 3 | import librosa |
4 | | -from diffsynth import save_video, VideoData, save_video_with_audio |
| 4 | +from diffsynth import VideoData, save_video_with_audio |
5 | 5 | from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig |
| 6 | +from modelscope import dataset_snapshot_download |
6 | 7 |
|
7 | 8 | pipe = WanVideoPipeline.from_pretrained( |
8 | 9 | torch_dtype=torch.bfloat16, |
|
15 | 16 | ], |
16 | 17 | audio_processor_config=ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="wav2vec2-large-xlsr-53-english/"), |
17 | 18 | ) |
| 19 | +dataset_snapshot_download( |
| 20 | + dataset_id="DiffSynth-Studio/example_video_dataset", |
| 21 | + local_dir="./data/example_video_dataset", |
| 22 | + allow_file_pattern=f"wans2v/*" |
| 23 | +) |
| 24 | + |
| 25 | +num_frames = 81 # 4n+1 |
| 26 | +height = 448 |
| 27 | +width = 832 |
18 | 28 |
|
19 | 29 | prompt = "a person is singing" |
20 | | -input_image = Image.open("/mnt/nas1/zhanghong/project/aigc/Wan2.2_s2v/examples/pose.png").convert("RGB").resize((width, height)) |
| 30 | +negative_prompt = "画面模糊,最差质量,画面模糊,细节模糊不清,情绪激动剧烈,手快速抖动,字幕,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" |
| 31 | +input_image = Image.open("data/example_video_dataset/wans2v/pose.png").convert("RGB").resize((width, height)) |
21 | 32 | # s2v audio input, recommend 16kHz sampling rate |
22 | | -audio_path = '/mnt/nas1/zhanghong/project/aigc/Wan2.2_s2v/examples/sing.MP3' |
| 33 | +audio_path = 'data/example_video_dataset/wans2v/sing.MP3' |
23 | 34 | input_audio, sample_rate = librosa.load(audio_path, sr=16000) |
24 | 35 |
|
25 | 36 | # Speech-to-video |
26 | 37 | video = pipe( |
27 | 38 | prompt=prompt, |
28 | 39 | input_image=input_image, |
29 | | - negative_prompt="", |
| 40 | + negative_prompt=negative_prompt, |
| 41 | + seed=0, |
| 42 | + num_frames=num_frames, |
| 43 | + height=height, |
| 44 | + width=width, |
| 45 | + audio_sample_rate=sample_rate, |
| 46 | + input_audio=input_audio, |
| 47 | + num_inference_steps=40, |
| 48 | +) |
| 49 | +save_video_with_audio(video[1:], "video_with_audio.mp4", audio_path, fps=16, quality=5) |
| 50 | + |
| 51 | +# s2v will use the first (num_frames) frames as reference. height and width must be the same as input_image. And fps should be 16, the same as output video fps. |
| 52 | +pose_video_path = 'data/example_video_dataset/wans2v/pose.mp4' |
| 53 | +pose_video = VideoData(pose_video_path, height=height, width=width) |
| 54 | + |
| 55 | +# Speech-to-video with pose |
| 56 | +video = pipe( |
| 57 | + prompt=prompt, |
| 58 | + input_image=input_image, |
| 59 | + negative_prompt=negative_prompt, |
30 | 60 | seed=0, |
31 | | - num_frames=81, |
32 | | - height=1280, |
33 | | - width=720, |
| 61 | + num_frames=num_frames, |
| 62 | + height=height, |
| 63 | + width=width, |
34 | 64 | audio_sample_rate=sample_rate, |
35 | 65 | input_audio=input_audio, |
| 66 | + s2v_pose_video=pose_video, |
36 | 67 | num_inference_steps=40, |
37 | 68 | ) |
38 | | -save_video_with_audio(video, "video_with_audio.mp4", audio_path, fps=16, quality=5) |
| 69 | +save_video_with_audio(video[1:], "video_pose_with_audio.mp4", audio_path, fps=16, quality=5) |
0 commit comments