Skip to content

Commit a0bae07

Browse files
committed
add wans2v example
1 parent ff71720 commit a0bae07

File tree

1 file changed

+39
-8
lines changed

1 file changed

+39
-8
lines changed
Lines changed: 39 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
import torch
22
from PIL import Image
33
import librosa
4-
from diffsynth import save_video, VideoData, save_video_with_audio
4+
from diffsynth import VideoData, save_video_with_audio
55
from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
6+
from modelscope import dataset_snapshot_download
67

78
pipe = WanVideoPipeline.from_pretrained(
89
torch_dtype=torch.bfloat16,
@@ -15,24 +16,54 @@
1516
],
1617
audio_processor_config=ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="wav2vec2-large-xlsr-53-english/"),
1718
)
19+
dataset_snapshot_download(
20+
dataset_id="DiffSynth-Studio/example_video_dataset",
21+
local_dir="./data/example_video_dataset",
22+
allow_file_pattern=f"wans2v/*"
23+
)
24+
25+
num_frames = 81 # 4n+1
26+
height = 448
27+
width = 832
1828

1929
prompt = "a person is singing"
20-
input_image = Image.open("/mnt/nas1/zhanghong/project/aigc/Wan2.2_s2v/examples/pose.png").convert("RGB").resize((width, height))
30+
negative_prompt = "画面模糊,最差质量,画面模糊,细节模糊不清,情绪激动剧烈,手快速抖动,字幕,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走"
31+
input_image = Image.open("data/example_video_dataset/wans2v/pose.png").convert("RGB").resize((width, height))
2132
# s2v audio input, recommend 16kHz sampling rate
22-
audio_path = '/mnt/nas1/zhanghong/project/aigc/Wan2.2_s2v/examples/sing.MP3'
33+
audio_path = 'data/example_video_dataset/wans2v/sing.MP3'
2334
input_audio, sample_rate = librosa.load(audio_path, sr=16000)
2435

2536
# Speech-to-video
2637
video = pipe(
2738
prompt=prompt,
2839
input_image=input_image,
29-
negative_prompt="",
40+
negative_prompt=negative_prompt,
41+
seed=0,
42+
num_frames=num_frames,
43+
height=height,
44+
width=width,
45+
audio_sample_rate=sample_rate,
46+
input_audio=input_audio,
47+
num_inference_steps=40,
48+
)
49+
save_video_with_audio(video[1:], "video_with_audio.mp4", audio_path, fps=16, quality=5)
50+
51+
# s2v will use the first (num_frames) frames as reference. height and width must be the same as input_image. And fps should be 16, the same as output video fps.
52+
pose_video_path = 'data/example_video_dataset/wans2v/pose.mp4'
53+
pose_video = VideoData(pose_video_path, height=height, width=width)
54+
55+
# Speech-to-video with pose
56+
video = pipe(
57+
prompt=prompt,
58+
input_image=input_image,
59+
negative_prompt=negative_prompt,
3060
seed=0,
31-
num_frames=81,
32-
height=1280,
33-
width=720,
61+
num_frames=num_frames,
62+
height=height,
63+
width=width,
3464
audio_sample_rate=sample_rate,
3565
input_audio=input_audio,
66+
s2v_pose_video=pose_video,
3667
num_inference_steps=40,
3768
)
38-
save_video_with_audio(video, "video_with_audio.mp4", audio_path, fps=16, quality=5)
69+
save_video_with_audio(video[1:], "video_pose_with_audio.mp4", audio_path, fps=16, quality=5)

0 commit comments

Comments
 (0)