Skip to content

Commit bdf3474

Browse files
feat(mm): fix clip vision starter model bases, add ref to actual models
1 parent 16570df commit bdf3474

File tree

1 file changed

+6
-2
lines changed

1 file changed

+6
-2
lines changed

invokeai/backend/model_manager/starter_models.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,16 +37,20 @@ class StarterModelBundle(BaseModel):
3737
)
3838

3939
# region CLIP Image Encoders
40+
41+
# This is CLIP-ViT-H-14-laion2B-s32B-b79K
4042
ip_adapter_sd_image_encoder = StarterModel(
4143
name="IP Adapter SD1.5 Image Encoder",
42-
base=BaseModelType.StableDiffusion1,
44+
base=BaseModelType.Any,
4345
source="InvokeAI/ip_adapter_sd_image_encoder",
4446
description="IP Adapter SD Image Encoder",
4547
type=ModelType.CLIPVision,
4648
)
49+
50+
# This is CLIP-ViT-bigG-14-laion2B-39B-b160k
4751
ip_adapter_sdxl_image_encoder = StarterModel(
4852
name="IP Adapter SDXL Image Encoder",
49-
base=BaseModelType.StableDiffusionXL,
53+
base=BaseModelType.Any,
5054
source="InvokeAI/ip_adapter_sdxl_image_encoder",
5155
description="IP Adapter SDXL Image Encoder",
5256
type=ModelType.CLIPVision,

0 commit comments

Comments
 (0)