File tree Expand file tree Collapse file tree 1 file changed +8
-0
lines changed Expand file tree Collapse file tree 1 file changed +8
-0
lines changed Original file line number Diff line number Diff line change 109109 "autoencoder-dc-sana" : "encoder.project_in.conv.bias" ,
110110 "mochi-1-preview" : ["model.diffusion_model.blocks.0.attn.qkv_x.weight" , "blocks.0.attn.qkv_x.weight" ],
111111 "hunyuan-video" : "txt_in.individual_token_refiner.blocks.0.adaLN_modulation.1.bias" ,
112+ "instruct-pix2pix" : "model.diffusion_model.input_blocks.0.0.weight" ,
112113}
113114
114115DIFFUSERS_DEFAULT_PIPELINE_PATHS = {
164165 "autoencoder-dc-f32c32-sana" : {"pretrained_model_name_or_path" : "mit-han-lab/dc-ae-f32c32-sana-1.0-diffusers" },
165166 "mochi-1-preview" : {"pretrained_model_name_or_path" : "genmo/mochi-1-preview" },
166167 "hunyuan-video" : {"pretrained_model_name_or_path" : "hunyuanvideo-community/HunyuanVideo" },
168+ "instruct-pix2pix" : {"pretrained_model_name_or_path" : "timbrooks/instruct-pix2pix" },
167169}
168170
169171# Use to configure model sample size when original config is provided
@@ -629,6 +631,12 @@ def infer_diffusers_model_type(checkpoint):
629631 elif CHECKPOINT_KEY_NAMES ["hunyuan-video" ] in checkpoint :
630632 model_type = "hunyuan-video"
631633
634+ elif (
635+ CHECKPOINT_KEY_NAMES ["instruct-pix2pix" ] in checkpoint
636+ and checkpoint [CHECKPOINT_KEY_NAMES ["instruct-pix2pix" ]].shape [1 ] == 8
637+ ):
638+ model_type = "instruct-pix2pix"
639+
632640 else :
633641 model_type = "v1"
634642
You can’t perform that action at this time.
0 commit comments