Skip to content

Commit bf1fdc8

Browse files
with new attn
1 parent 8d8ed8b commit bf1fdc8

File tree

3 files changed

+417
-35
lines changed

3 files changed

+417
-35
lines changed
Lines changed: 384 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,384 @@
1+
"""
2+
Convert a CogView4 checkpoint to the Diffusers format.
3+
4+
This script converts a CogView4 checkpoint to the Diffusers format, which can then be used
5+
with the Diffusers library.
6+
7+
Example usage:
8+
python scripts/convert_cogview4_to_diffusers.py \
9+
--transformer_checkpoint_path 'your path/cogview4_6b/1/mp_rank_00_model_states.pt' \
10+
--vae_checkpoint_path 'your path/cogview4_6b/imagekl_ch16.pt' \
11+
--output_path "THUDM/CogView4-6B" \
12+
--dtype "bf16"
13+
14+
Arguments:
15+
--transformer_checkpoint_path: Path to Transformer state dict.
16+
--vae_checkpoint_path: Path to VAE state dict.
17+
--output_path: The path to save the converted model.
18+
--push_to_hub: Whether to push the converted checkpoint to the HF Hub or not. Defaults to `False`.
19+
--text_encoder_cache_dir: Cache directory where text encoder is located. Defaults to None, which means HF_HOME will be used.
20+
--dtype: The dtype to save the model in (default: "bf16", options: "fp16", "bf16", "fp32"). If None, the dtype of the state dict is considered.
21+
22+
Default is "bf16" because CogView4 uses bfloat16 for training.
23+
24+
Note: You must provide either --transformer_checkpoint_path or --vae_checkpoint_path.
25+
"""
26+
27+
import argparse
28+
from contextlib import nullcontext
29+
import torch
30+
from transformers import PreTrainedTokenizerFast, GlmForCausalLM
31+
from tqdm import tqdm
32+
33+
from diffusers import (
34+
AutoencoderKL,
35+
CogView4DDIMScheduler,
36+
CogView4Pipeline,
37+
CogView4Transformer2DModel,
38+
)
39+
from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint
40+
41+
parser = argparse.ArgumentParser()
42+
parser.add_argument(
43+
"--transformer_checkpoint_path",
44+
default=None,
45+
type=str,
46+
help="Path to Megatron (not SAT) Transformer checkpoint, e.g., 'model_optim_rng.pt'.",
47+
)
48+
parser.add_argument(
49+
"--vae_checkpoint_path",
50+
default=None,
51+
type=str,
52+
help="(Optional) Path to VAE checkpoint, e.g., 'imagekl_ch16.pt'.",
53+
)
54+
parser.add_argument(
55+
"--output_path",
56+
required=True,
57+
type=str,
58+
help="Directory to save the final Diffusers format pipeline.",
59+
)
60+
parser.add_argument(
61+
"--push_to_hub",
62+
action="store_true",
63+
default=False,
64+
help="Whether to push the converted model to the HuggingFace Hub.",
65+
)
66+
parser.add_argument(
67+
"--text_encoder_cache_dir",
68+
type=str,
69+
default=None,
70+
help="Specify the cache directory for the text encoder.",
71+
)
72+
parser.add_argument(
73+
"--dtype",
74+
type=str,
75+
default="bf16",
76+
choices=["fp16", "bf16", "fp32"],
77+
help="Data type to save the model in.",
78+
)
79+
80+
parser.add_argument(
81+
"--num_layers",
82+
type=int,
83+
default=28,
84+
help="Number of Transformer layers (e.g., 28, 48...).",
85+
)
86+
parser.add_argument(
87+
"--num_heads",
88+
type=int,
89+
default=32,
90+
help="Number of attention heads.",
91+
)
92+
parser.add_argument(
93+
"--hidden_size",
94+
type=int,
95+
default=4096,
96+
help="Transformer hidden dimension size.",
97+
)
98+
parser.add_argument(
99+
"--attention_head_dim",
100+
type=int,
101+
default=128,
102+
help="Dimension of each attention head.",
103+
)
104+
parser.add_argument(
105+
"--time_embed_dim",
106+
type=int,
107+
default=512,
108+
help="Dimension of time embeddings.",
109+
)
110+
parser.add_argument(
111+
"--condition_dim",
112+
type=int,
113+
default=256,
114+
help="Dimension of condition embeddings.",
115+
)
116+
parser.add_argument(
117+
"--pos_embed_max_size",
118+
type=int,
119+
default=128,
120+
help="Maximum size for positional embeddings.",
121+
)
122+
123+
args = parser.parse_args()
124+
125+
126+
def swap_scale_shift(weight, dim):
127+
"""
128+
Swap the scale and shift components in the weight tensor.
129+
130+
Args:
131+
weight (torch.Tensor): The original weight tensor.
132+
dim (int): The dimension along which to split.
133+
134+
Returns:
135+
torch.Tensor: The modified weight tensor with scale and shift swapped.
136+
"""
137+
shift, scale = weight.chunk(2, dim=dim)
138+
new_weight = torch.cat([scale, shift], dim=dim)
139+
return new_weight
140+
141+
142+
def convert_megatron_transformer_checkpoint_to_diffusers(
143+
ckpt_path: str,
144+
num_layers: int,
145+
num_heads: int,
146+
hidden_size: int,
147+
):
148+
"""
149+
Convert a Megatron Transformer checkpoint to Diffusers format.
150+
151+
Args:
152+
ckpt_path (str): Path to the Megatron Transformer checkpoint.
153+
num_layers (int): Number of Transformer layers.
154+
num_heads (int): Number of attention heads.
155+
hidden_size (int): Hidden size of the Transformer.
156+
157+
Returns:
158+
dict: The converted state dictionary compatible with Diffusers.
159+
"""
160+
ckpt = torch.load(ckpt_path, map_location="cpu")
161+
mega = ckpt["model"]
162+
163+
new_state_dict = {}
164+
165+
# Patch Embedding
166+
new_state_dict["patch_embed.proj.weight"] = mega["encoder_expand_linear.weight"].reshape(hidden_size, 64)
167+
new_state_dict["patch_embed.proj.bias"] = mega["encoder_expand_linear.bias"]
168+
new_state_dict["patch_embed.text_proj.weight"] = mega["text_projector.weight"]
169+
new_state_dict["patch_embed.text_proj.bias"] = mega["text_projector.bias"]
170+
171+
# Time Condition Embedding
172+
new_state_dict["time_condition_embed.timestep_embedder.linear_1.weight"] = mega[
173+
"time_embedding.time_embed.0.weight"
174+
]
175+
new_state_dict["time_condition_embed.timestep_embedder.linear_1.bias"] = mega["time_embedding.time_embed.0.bias"]
176+
new_state_dict["time_condition_embed.timestep_embedder.linear_2.weight"] = mega[
177+
"time_embedding.time_embed.2.weight"
178+
]
179+
new_state_dict["time_condition_embed.timestep_embedder.linear_2.bias"] = mega["time_embedding.time_embed.2.bias"]
180+
181+
new_state_dict["time_condition_embed.condition_embedder.linear_1.weight"] = mega[
182+
"label_embedding.label_embed.0.weight"
183+
]
184+
new_state_dict["time_condition_embed.condition_embedder.linear_1.bias"] = mega[
185+
"label_embedding.label_embed.0.bias"
186+
]
187+
new_state_dict["time_condition_embed.condition_embedder.linear_2.weight"] = mega[
188+
"label_embedding.label_embed.2.weight"
189+
]
190+
new_state_dict["time_condition_embed.condition_embedder.linear_2.bias"] = mega[
191+
"label_embedding.label_embed.2.bias"
192+
]
193+
194+
# Convert each Transformer layer
195+
for i in tqdm(range(num_layers), desc="Converting layers (Megatron->Diffusers)"):
196+
block_prefix = f"transformer_blocks.{i}."
197+
198+
# AdaLayerNorm
199+
new_state_dict[block_prefix + "norm1.linear.weight"] = swap_scale_shift(
200+
mega[f"decoder.layers.{i}.adaln.weight"], dim=0
201+
)
202+
new_state_dict[block_prefix + "norm1.linear.bias"] = swap_scale_shift(
203+
mega[f"decoder.layers.{i}.adaln.bias"], dim=0
204+
)
205+
206+
# QKV
207+
qkv_weight = mega[f"decoder.layers.{i}.self_attention.linear_qkv.weight"]
208+
qkv_bias = mega[f"decoder.layers.{i}.self_attention.linear_qkv.bias"]
209+
210+
# Reshape to match SAT logic
211+
qkv_weight = qkv_weight.view(num_heads, 3, hidden_size // num_heads, hidden_size)
212+
qkv_weight = qkv_weight.permute(1, 0, 2, 3).reshape(3 * hidden_size, hidden_size)
213+
214+
qkv_bias = qkv_bias.view(num_heads, 3, hidden_size // num_heads)
215+
qkv_bias = qkv_bias.permute(1, 0, 2).reshape(3 * hidden_size)
216+
217+
# Assign to Diffusers keys
218+
q, k, v = torch.chunk(qkv_weight, 3, dim=0)
219+
qb, kb, vb = torch.chunk(qkv_bias, 3, dim=0)
220+
221+
new_state_dict[block_prefix + "attn1.to_q.weight"] = q
222+
new_state_dict[block_prefix + "attn1.to_q.bias"] = qb
223+
new_state_dict[block_prefix + "attn1.to_k.weight"] = k
224+
new_state_dict[block_prefix + "attn1.to_k.bias"] = kb
225+
new_state_dict[block_prefix + "attn1.to_v.weight"] = v
226+
new_state_dict[block_prefix + "attn1.to_v.bias"] = vb
227+
228+
# Attention Output
229+
new_state_dict[block_prefix + "attn1.to_out.0.weight"] = mega[
230+
f"decoder.layers.{i}.self_attention.linear_proj.weight"
231+
].T
232+
new_state_dict[block_prefix + "attn1.to_out.0.bias"] = mega[
233+
f"decoder.layers.{i}.self_attention.linear_proj.bias"
234+
]
235+
236+
# MLP
237+
new_state_dict[block_prefix + "ff.net.0.proj.weight"] = mega[f"decoder.layers.{i}.mlp.linear_fc1.weight"]
238+
new_state_dict[block_prefix + "ff.net.0.proj.bias"] = mega[f"decoder.layers.{i}.mlp.linear_fc1.bias"]
239+
new_state_dict[block_prefix + "ff.net.2.weight"] = mega[f"decoder.layers.{i}.mlp.linear_fc2.weight"]
240+
new_state_dict[block_prefix + "ff.net.2.bias"] = mega[f"decoder.layers.{i}.mlp.linear_fc2.bias"]
241+
242+
# Final Layers
243+
new_state_dict["norm_out.linear.weight"] = swap_scale_shift(mega["adaln_final.weight"], dim=0)
244+
new_state_dict["norm_out.linear.bias"] = swap_scale_shift(mega["adaln_final.bias"], dim=0)
245+
new_state_dict["proj_out.weight"] = mega["output_projector.weight"]
246+
new_state_dict["proj_out.bias"] = mega["output_projector.bias"]
247+
248+
return new_state_dict
249+
250+
251+
def convert_cogview4_vae_checkpoint_to_diffusers(ckpt_path, vae_config):
252+
"""
253+
Convert a CogView4 VAE checkpoint to Diffusers format.
254+
255+
Args:
256+
ckpt_path (str): Path to the VAE checkpoint.
257+
vae_config (dict): Configuration dictionary for the VAE.
258+
259+
Returns:
260+
dict: The converted VAE state dictionary compatible with Diffusers.
261+
"""
262+
original_state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"]
263+
return convert_ldm_vae_checkpoint(original_state_dict, vae_config)
264+
265+
266+
def main(args):
267+
"""
268+
Main function to convert CogView4 checkpoints to Diffusers format.
269+
270+
Args:
271+
args (argparse.Namespace): Parsed command-line arguments.
272+
"""
273+
# Determine the desired data type
274+
if args.dtype == "fp16":
275+
dtype = torch.float16
276+
elif args.dtype == "bf16":
277+
dtype = torch.bfloat16
278+
elif args.dtype == "fp32":
279+
dtype = torch.float32
280+
else:
281+
raise ValueError(f"Unsupported dtype: {args.dtype}")
282+
283+
transformer = None
284+
vae = None
285+
286+
# Convert Transformer checkpoint if provided
287+
if args.transformer_checkpoint_path is not None:
288+
converted_transformer_state_dict = convert_megatron_transformer_checkpoint_to_diffusers(
289+
ckpt_path=args.transformer_checkpoint_path,
290+
num_layers=args.num_layers,
291+
num_heads=args.num_heads,
292+
hidden_size=args.hidden_size,
293+
)
294+
transformer = CogView4Transformer2DModel(
295+
patch_size=2,
296+
in_channels=16,
297+
num_layers=args.num_layers,
298+
attention_head_dim=args.attention_head_dim,
299+
num_attention_heads=args.num_heads,
300+
out_channels=16,
301+
text_embed_dim=args.hidden_size,
302+
time_embed_dim=args.time_embed_dim,
303+
condition_dim=args.condition_dim,
304+
pos_embed_max_size=args.pos_embed_max_size,
305+
)
306+
307+
transformer.load_state_dict(converted_transformer_state_dict, strict=True)
308+
309+
# Convert to the specified dtype
310+
if dtype is not None:
311+
transformer = transformer.to(dtype=dtype)
312+
313+
# Convert VAE checkpoint if provided
314+
if args.vae_checkpoint_path is not None:
315+
vae_config = {
316+
"in_channels": 3,
317+
"out_channels": 3,
318+
"down_block_types": ("DownEncoderBlock2D",) * 4,
319+
"up_block_types": ("UpDecoderBlock2D",) * 4,
320+
"block_out_channels": (128, 512, 1024, 1024),
321+
"layers_per_block": 3,
322+
"act_fn": "silu",
323+
"latent_channels": 16,
324+
"norm_num_groups": 32,
325+
"sample_size": 1024,
326+
"scaling_factor": 1.0,
327+
"force_upcast": True,
328+
"use_quant_conv": False,
329+
"use_post_quant_conv": False,
330+
"mid_block_add_attention": False,
331+
}
332+
converted_vae_state_dict = convert_cogview4_vae_checkpoint_to_diffusers(args.vae_checkpoint_path, vae_config)
333+
vae = AutoencoderKL(**vae_config)
334+
vae.load_state_dict(converted_vae_state_dict, strict=True)
335+
if dtype is not None:
336+
vae = vae.to(dtype=dtype)
337+
338+
# Load the text encoder and tokenizer
339+
text_encoder_id = "/share/home/zyx/Models/glm-4-9b-hf"
340+
tokenizer = PreTrainedTokenizerFast.from_pretrained(text_encoder_id)
341+
text_encoder = GlmForCausalLM.from_pretrained(
342+
text_encoder_id,
343+
cache_dir=args.text_encoder_cache_dir,
344+
torch_dtype=torch.bfloat16 if args.dtype == "bf16" else torch.float32,
345+
)
346+
for param in text_encoder.parameters():
347+
param.data = param.data.contiguous()
348+
349+
# Initialize the scheduler
350+
scheduler = CogView4DDIMScheduler.from_config(
351+
{
352+
"shift_scale": 1.0,
353+
"beta_end": 0.012,
354+
"beta_schedule": "scaled_linear",
355+
"beta_start": 0.00085,
356+
"clip_sample": False,
357+
"num_train_timesteps": 1000,
358+
"prediction_type": "v_prediction",
359+
"rescale_betas_zero_snr": True,
360+
"set_alpha_to_one": True,
361+
"timestep_spacing": "linspace",
362+
}
363+
)
364+
365+
# Create the pipeline
366+
pipe = CogView4Pipeline(
367+
tokenizer=tokenizer,
368+
text_encoder=text_encoder,
369+
vae=vae,
370+
transformer=transformer,
371+
scheduler=scheduler,
372+
)
373+
374+
# Save the converted pipeline
375+
pipe.save_pretrained(
376+
args.output_path,
377+
safe_serialization=True,
378+
max_shard_size="5GB",
379+
push_to_hub=args.push_to_hub,
380+
)
381+
382+
383+
if __name__ == "__main__":
384+
main(args)

0 commit comments

Comments
 (0)