Skip to content

Commit 58bca1b

Browse files
feat(nodes): use new ui_model_[base|type|variant] on all core nodes
1 parent 54aa690 commit 58bca1b

16 files changed

+106
-50
lines changed

invokeai/app/invocations/cogview4_model_loader.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
invocation,
66
invocation_output,
77
)
8-
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
8+
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
99
from invokeai.app.invocations.model import (
1010
GlmEncoderField,
1111
ModelIdentifierField,
@@ -14,6 +14,7 @@
1414
)
1515
from invokeai.app.services.shared.invocation_context import InvocationContext
1616
from invokeai.backend.model_manager.config import SubModelType
17+
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
1718

1819

1920
@invocation_output("cogview4_model_loader_output")
@@ -38,8 +39,9 @@ class CogView4ModelLoaderInvocation(BaseInvocation):
3839

3940
model: ModelIdentifierField = InputField(
4041
description=FieldDescriptions.cogview4_model,
41-
ui_type=UIType.CogView4MainModel,
4242
input=Input.Direct,
43+
ui_model_base=BaseModelType.CogView4,
44+
ui_model_type=ModelType.Main,
4345
)
4446

4547
def invoke(self, context: InvocationContext) -> CogView4ModelLoaderOutput:

invokeai/app/invocations/controlnet.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
ImageField,
1717
InputField,
1818
OutputField,
19-
UIType,
2019
)
2120
from invokeai.app.invocations.model import ModelIdentifierField
2221
from invokeai.app.invocations.primitives import ImageOutput
@@ -28,6 +27,7 @@
2827
heuristic_resize_fast,
2928
)
3029
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
30+
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
3131

3232

3333
class ControlField(BaseModel):
@@ -69,7 +69,9 @@ class ControlNetInvocation(BaseInvocation):
6969

7070
image: ImageField = InputField(description="The control image")
7171
control_model: ModelIdentifierField = InputField(
72-
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
72+
description=FieldDescriptions.controlnet_model,
73+
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusionXL],
74+
ui_model_type=ModelType.ControlNet,
7375
)
7476
control_weight: Union[float, List[float]] = InputField(
7577
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"

invokeai/app/invocations/flux_control_lora_loader.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
invocation,
55
invocation_output,
66
)
7-
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
7+
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
88
from invokeai.app.invocations.model import ControlLoRAField, ModelIdentifierField
99
from invokeai.app.services.shared.invocation_context import InvocationContext
10+
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
1011

1112

1213
@invocation_output("flux_control_lora_loader_output")
@@ -29,7 +30,10 @@ class FluxControlLoRALoaderInvocation(BaseInvocation):
2930
"""LoRA model and Image to use with FLUX transformer generation."""
3031

3132
lora: ModelIdentifierField = InputField(
32-
description=FieldDescriptions.control_lora_model, title="Control LoRA", ui_type=UIType.ControlLoRAModel
33+
description=FieldDescriptions.control_lora_model,
34+
title="Control LoRA",
35+
ui_model_base=BaseModelType.Flux,
36+
ui_model_type=ModelType.ControlLoRa,
3337
)
3438
image: ImageField = InputField(description="The image to encode.")
3539
weight: float = InputField(description="The weight of the LoRA.", default=1.0)

invokeai/app/invocations/flux_controlnet.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,12 @@
66
invocation,
77
invocation_output,
88
)
9-
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
9+
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
1010
from invokeai.app.invocations.model import ModelIdentifierField
1111
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
1212
from invokeai.app.services.shared.invocation_context import InvocationContext
1313
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
14+
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
1415

1516

1617
class FluxControlNetField(BaseModel):
@@ -57,7 +58,9 @@ class FluxControlNetInvocation(BaseInvocation):
5758

5859
image: ImageField = InputField(description="The control image")
5960
control_model: ModelIdentifierField = InputField(
60-
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
61+
description=FieldDescriptions.controlnet_model,
62+
ui_model_base=BaseModelType.Flux,
63+
ui_model_type=ModelType.ControlNet,
6164
)
6265
control_weight: float | list[float] = InputField(
6366
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"

invokeai/app/invocations/flux_ip_adapter.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from typing_extensions import Self
66

77
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
8-
from invokeai.app.invocations.fields import InputField, UIType
8+
from invokeai.app.invocations.fields import InputField
99
from invokeai.app.invocations.ip_adapter import (
1010
CLIP_VISION_MODEL_MAP,
1111
IPAdapterField,
@@ -20,6 +20,7 @@
2020
IPAdapterCheckpointConfig,
2121
IPAdapterInvokeAIConfig,
2222
)
23+
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
2324

2425

2526
@invocation(
@@ -36,7 +37,10 @@ class FluxIPAdapterInvocation(BaseInvocation):
3637

3738
image: ImageField = InputField(description="The IP-Adapter image prompt(s).")
3839
ip_adapter_model: ModelIdentifierField = InputField(
39-
description="The IP-Adapter model.", title="IP-Adapter Model", ui_type=UIType.IPAdapterModel
40+
description="The IP-Adapter model.",
41+
title="IP-Adapter Model",
42+
ui_model_base=BaseModelType.Flux,
43+
ui_model_type=ModelType.IPAdapter,
4044
)
4145
# Currently, the only known ViT model used by FLUX IP-Adapters is ViT-L.
4246
clip_vision_model: Literal["ViT-L"] = InputField(description="CLIP Vision model to use.", default="ViT-L")

invokeai/app/invocations/flux_lora_loader.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66
invocation,
77
invocation_output,
88
)
9-
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
9+
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
1010
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
1111
from invokeai.app.services.shared.invocation_context import InvocationContext
12-
from invokeai.backend.model_manager.taxonomy import BaseModelType
12+
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
1313

1414

1515
@invocation_output("flux_lora_loader_output")
@@ -36,7 +36,10 @@ class FluxLoRALoaderInvocation(BaseInvocation):
3636
"""Apply a LoRA model to a FLUX transformer and/or text encoder."""
3737

3838
lora: ModelIdentifierField = InputField(
39-
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
39+
description=FieldDescriptions.lora_model,
40+
title="LoRA",
41+
ui_model_base=BaseModelType.Flux,
42+
ui_model_type=ModelType.LoRA,
4043
)
4144
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
4245
transformer: TransformerField | None = InputField(

invokeai/app/invocations/flux_model_loader.py

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
invocation,
77
invocation_output,
88
)
9-
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
9+
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
1010
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
1111
from invokeai.app.services.shared.invocation_context import InvocationContext
1212
from invokeai.app.util.t5_model_identifier import (
@@ -17,7 +17,7 @@
1717
from invokeai.backend.model_manager.config import (
1818
CheckpointConfigBase,
1919
)
20-
from invokeai.backend.model_manager.taxonomy import SubModelType
20+
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
2121

2222

2323
@invocation_output("flux_model_loader_output")
@@ -46,23 +46,30 @@ class FluxModelLoaderInvocation(BaseInvocation):
4646

4747
model: ModelIdentifierField = InputField(
4848
description=FieldDescriptions.flux_model,
49-
ui_type=UIType.FluxMainModel,
5049
input=Input.Direct,
50+
ui_model_base=BaseModelType.Flux,
51+
ui_model_type=ModelType.Main,
5152
)
5253

5354
t5_encoder_model: ModelIdentifierField = InputField(
54-
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
55+
description=FieldDescriptions.t5_encoder,
56+
input=Input.Direct,
57+
title="T5 Encoder",
58+
ui_model_type=ModelType.T5Encoder,
5559
)
5660

5761
clip_embed_model: ModelIdentifierField = InputField(
5862
description=FieldDescriptions.clip_embed_model,
59-
ui_type=UIType.CLIPEmbedModel,
6063
input=Input.Direct,
6164
title="CLIP Embed",
65+
ui_model_type=ModelType.CLIPEmbed,
6266
)
6367

6468
vae_model: ModelIdentifierField = InputField(
65-
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
69+
description=FieldDescriptions.vae_model,
70+
title="VAE",
71+
ui_model_base=BaseModelType.Flux,
72+
ui_model_type=ModelType.VAE,
6673
)
6774

6875
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:

invokeai/app/invocations/flux_redux.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
InputField,
1919
OutputField,
2020
TensorField,
21-
UIType,
2221
)
2322
from invokeai.app.invocations.model import ModelIdentifierField
2423
from invokeai.app.invocations.primitives import ImageField
@@ -64,7 +63,8 @@ class FluxReduxInvocation(BaseInvocation):
6463
redux_model: ModelIdentifierField = InputField(
6564
description="The FLUX Redux model to use.",
6665
title="FLUX Redux Model",
67-
ui_type=UIType.FluxReduxModel,
66+
ui_model_base=BaseModelType.Flux,
67+
ui_model_type=ModelType.FluxRedux,
6868
)
6969
downsampling_factor: int = InputField(
7070
ge=1,

invokeai/app/invocations/ip_adapter.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from typing_extensions import Self
66

77
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
8-
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField, UIType
8+
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField
99
from invokeai.app.invocations.model import ModelIdentifierField
1010
from invokeai.app.invocations.primitives import ImageField
1111
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
@@ -85,7 +85,8 @@ class IPAdapterInvocation(BaseInvocation):
8585
description="The IP-Adapter model.",
8686
title="IP-Adapter Model",
8787
ui_order=-1,
88-
ui_type=UIType.IPAdapterModel,
88+
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusionXL],
89+
ui_model_type=ModelType.IPAdapter,
8990
)
9091
clip_vision_model: Literal["ViT-H", "ViT-G", "ViT-L"] = InputField(
9192
description="CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.",

invokeai/app/invocations/llava_onevision_vllm.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,12 @@
66
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration, LlavaOnevisionProcessor
77

88
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
9-
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent, UIType
9+
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent
1010
from invokeai.app.invocations.model import ModelIdentifierField
1111
from invokeai.app.invocations.primitives import StringOutput
1212
from invokeai.app.services.shared.invocation_context import InvocationContext
1313
from invokeai.backend.llava_onevision_pipeline import LlavaOnevisionPipeline
14+
from invokeai.backend.model_manager.taxonomy import ModelType
1415
from invokeai.backend.util.devices import TorchDevice
1516

1617

@@ -34,7 +35,7 @@ class LlavaOnevisionVllmInvocation(BaseInvocation):
3435
vllm_model: ModelIdentifierField = InputField(
3536
title="LLaVA Model Type",
3637
description=FieldDescriptions.vllm_model,
37-
ui_type=UIType.LlavaOnevisionModel,
38+
ui_model_type=ModelType.LlavaOnevision,
3839
)
3940

4041
@field_validator("images", mode="before")

0 commit comments

Comments
 (0)