forked from Eco-Sphere/cache-dit
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_wan_2.2_i2v.py
More file actions
170 lines (144 loc) · 5.37 KB
/
run_wan_2.2_i2v.py
File metadata and controls
170 lines (144 loc) · 5.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import os
import sys
sys.path.append("..")
import time
import torch
import diffusers
from diffusers import (
AutoencoderKLWan,
WanTransformer3DModel,
WanImageToVideoPipeline,
)
from diffusers.utils import export_to_video, load_image
from utils import get_args, GiB, strify, cachify
import cache_dit
import numpy as np
args = get_args()
print(args)
model_id = os.environ.get(
"WAN_2_2_I2V_DIR",
"Wan-AI/Wan2.2-I2V-A14B-Diffusers",
)
pipe: WanImageToVideoPipeline = WanImageToVideoPipeline.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
# Based on: https://github.com/huggingface/diffusers/pull/12523
device_map=(
"balanced" if GiB() < 96 and torch.cuda.device_count() > 1 else None
),
)
if GiB() < 96 and torch.cuda.device_count() <= 1:
# issue: https://github.com/huggingface/diffusers/issues/12499
print("Enable model cpu offload for low memory device.")
pipe.enable_model_cpu_offload()
if args.cache:
from cache_dit import (
ForwardPattern,
BlockAdapter,
ParamsModifier,
DBCacheConfig,
)
cachify(
args,
BlockAdapter(
pipe=pipe,
transformer=[
pipe.transformer,
pipe.transformer_2,
],
blocks=[
pipe.transformer.blocks,
pipe.transformer_2.blocks,
],
forward_pattern=[
ForwardPattern.Pattern_2,
ForwardPattern.Pattern_2,
],
params_modifiers=[
# high-noise transformer only have 30% steps
ParamsModifier(
cache_config=DBCacheConfig().reset(
max_warmup_steps=4,
max_cached_steps=8,
),
),
ParamsModifier(
cache_config=DBCacheConfig().reset(
max_warmup_steps=2,
max_cached_steps=20,
),
),
],
has_separate_cfg=True,
),
)
# Wan currently requires installing diffusers from source
assert isinstance(pipe.vae, AutoencoderKLWan) # enable type check for IDE
if diffusers.__version__ >= "0.34.0":
pipe.vae.enable_tiling()
pipe.vae.enable_slicing()
else:
print(
"Wan pipeline requires diffusers version >= 0.34.0 "
"for vae tiling and slicing, please install diffusers "
"from source."
)
assert isinstance(pipe.transformer, WanTransformer3DModel)
assert isinstance(pipe.transformer_2, WanTransformer3DModel)
if args.quantize:
assert isinstance(args.quantize_type, str)
if args.quantize_type.endswith("wo"): # weight only
pipe.transformer = cache_dit.quantize(
pipe.transformer,
quant_type=args.quantize_type,
)
# We only apply activation quantization (default: FP8 DQ)
# for low-noise transformer to avoid non-trivial precision
# downgrade.
pipe.transformer_2 = cache_dit.quantize(
pipe.transformer_2,
quant_type=args.quantize_type,
)
image = load_image(
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/wan_i2v_input.JPG"
)
max_area = 480 * 832
aspect_ratio = image.height / image.width
mod_value = (
pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
)
height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
image = image.resize((width, height))
prompt = "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside."
negative_prompt = "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走"
def run_pipe():
video = pipe(
image=image,
prompt=prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
num_frames=81, # pipe.vae_scale_factor_temporal=4
guidance_scale=3.5,
num_inference_steps=50,
generator=torch.Generator(device="cpu").manual_seed(0),
).frames[0]
return video
if args.compile or args.quantize:
cache_dit.set_compile_configs()
pipe.transformer.compile_repeated_blocks(fullgraph=True)
pipe.transformer_2.compile_repeated_blocks(fullgraph=True)
# warmup
run_pipe()
start = time.time()
video = run_pipe()
end = time.time()
cache_dit.summary(pipe, details=True)
time_cost = end - start
save_path = (
f"wan2.2-i2v.frame{len(video)}.{height}x{width}.{strify(args, pipe)}.mp4"
)
print(f"Time cost: {time_cost:.2f}s")
print(f"Saving video to {save_path}")
export_to_video(video, save_path, fps=16)