diff --git a/invokeai/app/api/routers/model_manager.py b/invokeai/app/api/routers/model_manager.py index 6142239cf65..ba84339a0e6 100644 --- a/invokeai/app/api/routers/model_manager.py +++ b/invokeai/app/api/routers/model_manager.py @@ -28,10 +28,12 @@ UnknownModelException, ) from invokeai.app.util.suppress_output import SuppressOutput -from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelType -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - MainCheckpointConfig, +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.configs.main import ( + Main_Checkpoint_SD1_Config, + Main_Checkpoint_SD2_Config, + Main_Checkpoint_SDXL_Config, + Main_Checkpoint_SDXLRefiner_Config, ) from invokeai.backend.model_manager.load.model_cache.cache_stats import CacheStats from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch @@ -44,6 +46,7 @@ StarterModelBundle, StarterModelWithoutDependencies, ) +from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"]) @@ -741,9 +744,18 @@ async def convert_model( logger.error(str(e)) raise HTTPException(status_code=424, detail=str(e)) - if not isinstance(model_config, MainCheckpointConfig): - logger.error(f"The model with key {key} is not a main checkpoint model.") - raise HTTPException(400, f"The model with key {key} is not a main checkpoint model.") + if not isinstance( + model_config, + ( + Main_Checkpoint_SD1_Config, + Main_Checkpoint_SD2_Config, + Main_Checkpoint_SDXL_Config, + Main_Checkpoint_SDXLRefiner_Config, + ), + ): + msg = f"The model with key {key} is not a main SD 1/2/XL checkpoint model." + logger.error(msg) + raise HTTPException(400, msg) with TemporaryDirectory(dir=ApiDependencies.invoker.services.configuration.models_path) as tmpdir: convert_path = pathlib.Path(tmpdir) / pathlib.Path(model_config.path).stem diff --git a/invokeai/app/invocations/cogview4_denoise.py b/invokeai/app/invocations/cogview4_denoise.py index c0b962ba31d..070d8a34783 100644 --- a/invokeai/app/invocations/cogview4_denoise.py +++ b/invokeai/app/invocations/cogview4_denoise.py @@ -22,7 +22,7 @@ from invokeai.app.invocations.primitives import LatentsOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional -from invokeai.backend.model_manager.config import BaseModelType +from invokeai.backend.model_manager.taxonomy import BaseModelType from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import RectifiedFlowInpaintExtension from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState from invokeai.backend.stable_diffusion.diffusion.conditioning_data import CogView4ConditioningInfo diff --git a/invokeai/app/invocations/cogview4_model_loader.py b/invokeai/app/invocations/cogview4_model_loader.py index 9db4f3c0537..fbafcd345fd 100644 --- a/invokeai/app/invocations/cogview4_model_loader.py +++ b/invokeai/app/invocations/cogview4_model_loader.py @@ -13,8 +13,7 @@ VAEField, ) from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.backend.model_manager.config import SubModelType -from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType +from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType @invocation_output("cogview4_model_loader_output") diff --git a/invokeai/app/invocations/create_gradient_mask.py b/invokeai/app/invocations/create_gradient_mask.py index b232fbbc932..8a7e7c52317 100644 --- a/invokeai/app/invocations/create_gradient_mask.py +++ b/invokeai/app/invocations/create_gradient_mask.py @@ -20,9 +20,7 @@ from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation from invokeai.app.invocations.model import UNetField, VAEField from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.backend.model_manager import LoadedModel -from invokeai.backend.model_manager.config import MainConfigBase -from invokeai.backend.model_manager.taxonomy import ModelVariantType +from invokeai.backend.model_manager.taxonomy import FluxVariantType, ModelType, ModelVariantType from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor @@ -182,10 +180,11 @@ def invoke(self, context: InvocationContext) -> GradientMaskOutput: if self.unet is not None and self.vae is not None and self.image is not None: # all three fields must be present at the same time main_model_config = context.models.get_config(self.unet.unet.key) - assert isinstance(main_model_config, MainConfigBase) - if main_model_config.variant is ModelVariantType.Inpaint: + assert main_model_config.type is ModelType.Main + variant = getattr(main_model_config, "variant", None) + if variant is ModelVariantType.Inpaint or variant is FluxVariantType.DevFill: mask = dilated_mask_tensor - vae_info: LoadedModel = context.models.load(self.vae.vae) + vae_info = context.models.load(self.vae.vae) image = context.images.get_pil(self.image.image_name) image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) if image_tensor.dim() == 3: diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 37b385914cc..bb114263e23 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -39,7 +39,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.backend.ip_adapter.ip_adapter import IPAdapter -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelVariantType from invokeai.backend.model_patcher import ModelPatcher from invokeai.backend.patches.layer_patcher import LayerPatcher diff --git a/invokeai/app/invocations/flux_denoise.py b/invokeai/app/invocations/flux_denoise.py index 35d095e2799..b6d0399108d 100644 --- a/invokeai/app/invocations/flux_denoise.py +++ b/invokeai/app/invocations/flux_denoise.py @@ -48,7 +48,7 @@ unpack, ) from invokeai.backend.flux.text_conditioning import FluxReduxConditioning, FluxTextConditioning -from invokeai.backend.model_manager.taxonomy import ModelFormat, ModelVariantType +from invokeai.backend.model_manager.taxonomy import BaseModelType, FluxVariantType, ModelFormat, ModelType from invokeai.backend.patches.layer_patcher import LayerPatcher from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX from invokeai.backend.patches.model_patch_raw import ModelPatchRaw @@ -232,7 +232,8 @@ def _run_diffusion( ) transformer_config = context.models.get_config(self.transformer.transformer) - is_schnell = "schnell" in getattr(transformer_config, "config_path", "") + assert transformer_config.base is BaseModelType.Flux and transformer_config.type is ModelType.Main + is_schnell = transformer_config.variant is FluxVariantType.Schnell # Calculate the timestep schedule. timesteps = get_schedule( @@ -277,7 +278,7 @@ def _run_diffusion( # Prepare the extra image conditioning tensor (img_cond) for either FLUX structural control or FLUX Fill. img_cond: torch.Tensor | None = None - is_flux_fill = transformer_config.variant == ModelVariantType.Inpaint # type: ignore + is_flux_fill = transformer_config.variant is FluxVariantType.DevFill if is_flux_fill: img_cond = self._prep_flux_fill_img_cond( context, device=TorchDevice.choose_torch_device(), dtype=inference_dtype diff --git a/invokeai/app/invocations/flux_ip_adapter.py b/invokeai/app/invocations/flux_ip_adapter.py index db5754ee2b0..4a1997c5122 100644 --- a/invokeai/app/invocations/flux_ip_adapter.py +++ b/invokeai/app/invocations/flux_ip_adapter.py @@ -16,10 +16,7 @@ from invokeai.app.invocations.primitives import ImageField from invokeai.app.invocations.util import validate_begin_end_step, validate_weights from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.backend.model_manager.config import ( - IPAdapterCheckpointConfig, - IPAdapterInvokeAIConfig, -) +from invokeai.backend.model_manager.configs.ip_adapter import IPAdapter_Checkpoint_FLUX_Config from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType @@ -68,7 +65,7 @@ def validate_begin_end_step_percent(self) -> Self: def invoke(self, context: InvocationContext) -> IPAdapterOutput: # Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model. ip_adapter_info = context.models.get_config(self.ip_adapter_model.key) - assert isinstance(ip_adapter_info, (IPAdapterInvokeAIConfig, IPAdapterCheckpointConfig)) + assert isinstance(ip_adapter_info, IPAdapter_Checkpoint_FLUX_Config) # Note: There is a IPAdapterInvokeAIConfig.image_encoder_model_id field, but it isn't trustworthy. image_encoder_starter_model = CLIP_VISION_MODEL_MAP[self.clip_vision_model] diff --git a/invokeai/app/invocations/flux_model_loader.py b/invokeai/app/invocations/flux_model_loader.py index e5a1966c659..eaac82bafc8 100644 --- a/invokeai/app/invocations/flux_model_loader.py +++ b/invokeai/app/invocations/flux_model_loader.py @@ -13,10 +13,8 @@ preprocess_t5_encoder_model_identifier, preprocess_t5_tokenizer_model_identifier, ) -from invokeai.backend.flux.util import max_seq_lengths -from invokeai.backend.model_manager.config import ( - CheckpointConfigBase, -) +from invokeai.backend.flux.util import get_flux_max_seq_length +from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType @@ -87,12 +85,12 @@ def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput: t5_encoder = preprocess_t5_encoder_model_identifier(self.t5_encoder_model) transformer_config = context.models.get_config(transformer) - assert isinstance(transformer_config, CheckpointConfigBase) + assert isinstance(transformer_config, Checkpoint_Config_Base) return FluxModelLoaderOutput( transformer=TransformerField(transformer=transformer, loras=[]), clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0), t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder, loras=[]), vae=VAEField(vae=vae), - max_seq_len=max_seq_lengths[transformer_config.config_path], + max_seq_len=get_flux_max_seq_length(transformer_config.variant), ) diff --git a/invokeai/app/invocations/flux_redux.py b/invokeai/app/invocations/flux_redux.py index 3e34497b105..403d78b0786 100644 --- a/invokeai/app/invocations/flux_redux.py +++ b/invokeai/app/invocations/flux_redux.py @@ -24,9 +24,9 @@ from invokeai.app.services.model_records.model_records_base import ModelRecordChanges from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel -from invokeai.backend.model_manager import BaseModelType, ModelType -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.starter_models import siglip +from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline from invokeai.backend.util.devices import TorchDevice diff --git a/invokeai/app/invocations/flux_text_encoder.py b/invokeai/app/invocations/flux_text_encoder.py index 77b6187840c..c395a0bf22d 100644 --- a/invokeai/app/invocations/flux_text_encoder.py +++ b/invokeai/app/invocations/flux_text_encoder.py @@ -17,7 +17,7 @@ from invokeai.app.invocations.primitives import FluxConditioningOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.flux.modules.conditioner import HFEncoder -from invokeai.backend.model_manager import ModelFormat +from invokeai.backend.model_manager.taxonomy import ModelFormat from invokeai.backend.patches.layer_patcher import LayerPatcher from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX from invokeai.backend.patches.model_patch_raw import ModelPatchRaw diff --git a/invokeai/app/invocations/flux_vae_encode.py b/invokeai/app/invocations/flux_vae_encode.py index 2932517edcf..4ec0365c2cb 100644 --- a/invokeai/app/invocations/flux_vae_encode.py +++ b/invokeai/app/invocations/flux_vae_encode.py @@ -12,7 +12,7 @@ from invokeai.app.invocations.primitives import LatentsOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.flux.modules.autoencoder import AutoEncoder -from invokeai.backend.model_manager import LoadedModel +from invokeai.backend.model_manager.load.load_base import LoadedModel from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux diff --git a/invokeai/app/invocations/image_to_latents.py b/invokeai/app/invocations/image_to_latents.py index 552f5edb1b2..fde70a34fde 100644 --- a/invokeai/app/invocations/image_to_latents.py +++ b/invokeai/app/invocations/image_to_latents.py @@ -23,7 +23,7 @@ from invokeai.app.invocations.model import VAEField from invokeai.app.invocations.primitives import LatentsOutput from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.backend.model_manager import LoadedModel +from invokeai.backend.model_manager.load.load_base import LoadedModel from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params from invokeai.backend.util.devices import TorchDevice diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 35a98ff6ba0..2b2931e78f3 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -11,10 +11,10 @@ from invokeai.app.invocations.util import validate_begin_end_step, validate_weights from invokeai.app.services.model_records.model_records_base import ModelRecordChanges from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - IPAdapterCheckpointConfig, - IPAdapterInvokeAIConfig, +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.configs.ip_adapter import ( + IPAdapter_Checkpoint_Config_Base, + IPAdapter_InvokeAI_Config_Base, ) from invokeai.backend.model_manager.starter_models import ( StarterModel, @@ -123,9 +123,9 @@ def validate_begin_end_step_percent(self) -> Self: def invoke(self, context: InvocationContext) -> IPAdapterOutput: # Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model. ip_adapter_info = context.models.get_config(self.ip_adapter_model.key) - assert isinstance(ip_adapter_info, (IPAdapterInvokeAIConfig, IPAdapterCheckpointConfig)) + assert isinstance(ip_adapter_info, (IPAdapter_InvokeAI_Config_Base, IPAdapter_Checkpoint_Config_Base)) - if isinstance(ip_adapter_info, IPAdapterInvokeAIConfig): + if isinstance(ip_adapter_info, IPAdapter_InvokeAI_Config_Base): image_encoder_model_id = ip_adapter_info.image_encoder_model_id image_encoder_model_name = image_encoder_model_id.split("/")[-1].strip() else: diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 2d338c677d2..753ae77c559 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -12,9 +12,7 @@ from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.shared.models import FreeUConfig -from invokeai.backend.model_manager.config import ( - AnyModelConfig, -) +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType @@ -24,8 +22,9 @@ class ModelIdentifierField(BaseModel): name: str = Field(description="The model's name") base: BaseModelType = Field(description="The model's base model type") type: ModelType = Field(description="The model's type") - submodel_type: Optional[SubModelType] = Field( - description="The submodel to load, if this is a main model", default=None + submodel_type: SubModelType | None = Field( + description="The submodel to load, if this is a main model", + default=None, ) @classmethod diff --git a/invokeai/app/invocations/sd3_denoise.py b/invokeai/app/invocations/sd3_denoise.py index f43f26ae0ed..b9d69369b76 100644 --- a/invokeai/app/invocations/sd3_denoise.py +++ b/invokeai/app/invocations/sd3_denoise.py @@ -23,7 +23,7 @@ from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional -from invokeai.backend.model_manager import BaseModelType +from invokeai.backend.model_manager.taxonomy import BaseModelType from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import RectifiedFlowInpaintExtension from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py index fc0f0bb2c69..c70ef3fa16e 100644 --- a/invokeai/app/services/events/events_base.py +++ b/invokeai/app/services/events/events_base.py @@ -44,8 +44,8 @@ SessionQueueItem, SessionQueueStatus, ) - from invokeai.backend.model_manager import SubModelType - from invokeai.backend.model_manager.config import AnyModelConfig + from invokeai.backend.model_manager.configs.factory import AnyModelConfig + from invokeai.backend.model_manager.taxonomy import SubModelType class EventServiceBase: diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py index 8fbb08015aa..2f995293984 100644 --- a/invokeai/app/services/events/events_common.py +++ b/invokeai/app/services/events/events_common.py @@ -16,8 +16,8 @@ ) from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput from invokeai.app.util.misc import get_timestamp -from invokeai.backend.model_manager import SubModelType -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.taxonomy import SubModelType if TYPE_CHECKING: from invokeai.app.services.download.download_base import DownloadJob diff --git a/invokeai/app/services/model_install/model_install_common.py b/invokeai/app/services/model_install/model_install_common.py index fea75d73752..67832466f3a 100644 --- a/invokeai/app/services/model_install/model_install_common.py +++ b/invokeai/app/services/model_install/model_install_common.py @@ -10,7 +10,7 @@ from invokeai.app.services.download import DownloadJob, MultiFileDownloadJob from invokeai.app.services.model_records import ModelRecordChanges -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType diff --git a/invokeai/app/services/model_install/model_install_default.py b/invokeai/app/services/model_install/model_install_default.py index 454697ea5a1..53bb5cc12df 100644 --- a/invokeai/app/services/model_install/model_install_default.py +++ b/invokeai/app/services/model_install/model_install_default.py @@ -5,6 +5,7 @@ import re import threading import time +from copy import deepcopy from pathlib import Path from queue import Empty, Queue from shutil import move, rmtree @@ -34,13 +35,11 @@ ) from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase from invokeai.app.services.model_records.model_records_base import ModelRecordChanges -from invokeai.backend.model_manager.config import ( +from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base +from invokeai.backend.model_manager.configs.factory import ( AnyModelConfig, - CheckpointConfigBase, - InvalidModelConfigException, - ModelConfigBase, + ModelConfigFactory, ) -from invokeai.backend.model_manager.legacy_probe import ModelProbe from invokeai.backend.model_manager.metadata import ( AnyModelRepoMetadata, HuggingFaceMetadataFetch, @@ -370,6 +369,8 @@ def unconditionally_delete(self, key: str) -> None: # noqa D102 model_path = self.app_config.models_path / model.path if model_path.is_file() or model_path.is_symlink(): model_path.unlink() + assert model_path.parent != self.app_config.models_path + os.rmdir(model_path.parent) elif model_path.is_dir(): rmtree(model_path) self.unregister(key) @@ -530,7 +531,7 @@ def _set_error(self, install_job: ModelInstallJob, excp: Exception) -> None: x.content_type is not None and "text/html" in x.content_type for x in multifile_download_job.download_parts ): install_job.set_error( - InvalidModelConfigException( + ValueError( f"At least one file in {install_job.local_path} is an HTML page, not a model. This can happen when an access token is required to download." ) ) @@ -598,18 +599,11 @@ def _probe(self, model_path: Path, config: Optional[ModelRecordChanges] = None): hash_algo = self._app_config.hashing_algorithm fields = config.model_dump() - # WARNING! - # The legacy probe relies on the implicit order of tests to determine model classification. - # This can lead to regressions between the legacy and new probes. - # Do NOT change the order of `probe` and `classify` without implementing one of the following fixes: - # Short-term fix: `classify` tests `matches` in the same order as the legacy probe. - # Long-term fix: Improve `matches` to be more specific so that only one config matches - # any given model - eliminating ambiguity and removing reliance on order. - # After implementing either of these fixes, remove @pytest.mark.xfail from `test_regression_against_model_probe` - try: - return ModelProbe.probe(model_path=model_path, fields=fields, hash_algo=hash_algo) # type: ignore - except InvalidModelConfigException: - return ModelConfigBase.classify(model_path, hash_algo, **fields) + return ModelConfigFactory.from_model_on_disk( + mod=model_path, + override_fields=deepcopy(fields), + hash_algo=hash_algo, + ) def _register( self, model_path: Path, config: Optional[ModelRecordChanges] = None, info: Optional[AnyModelConfig] = None @@ -630,7 +624,7 @@ def _register( info.path = model_path.as_posix() - if isinstance(info, CheckpointConfigBase): + if isinstance(info, Checkpoint_Config_Base) and info.config_path is not None: # Checkpoints have a config file needed for conversion. Same handling as the model weights - if it's in the # invoke-managed legacy config dir, we use a relative path. legacy_config_path = self.app_config.legacy_conf_path / info.config_path diff --git a/invokeai/app/services/model_load/model_load_base.py b/invokeai/app/services/model_load/model_load_base.py index 8aae80e29da..87a405b4ea4 100644 --- a/invokeai/app/services/model_load/model_load_base.py +++ b/invokeai/app/services/model_load/model_load_base.py @@ -5,7 +5,7 @@ from pathlib import Path from typing import Callable, Optional -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load import LoadedModel, LoadedModelWithoutConfig from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType diff --git a/invokeai/app/services/model_load/model_load_default.py b/invokeai/app/services/model_load/model_load_default.py index ad4ad97a02c..2e2d2ae219d 100644 --- a/invokeai/app/services/model_load/model_load_default.py +++ b/invokeai/app/services/model_load/model_load_default.py @@ -11,7 +11,7 @@ from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.invoker import Invoker from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load import ( LoadedModel, LoadedModelWithoutConfig, diff --git a/invokeai/app/services/model_manager/__init__.py b/invokeai/app/services/model_manager/__init__.py index aad67ff3527..e703d4f1ffc 100644 --- a/invokeai/app/services/model_manager/__init__.py +++ b/invokeai/app/services/model_manager/__init__.py @@ -1,12 +1,10 @@ """Initialization file for model manager service.""" from invokeai.app.services.model_manager.model_manager_default import ModelManagerService, ModelManagerServiceBase -from invokeai.backend.model_manager import AnyModelConfig from invokeai.backend.model_manager.load import LoadedModel __all__ = [ "ModelManagerServiceBase", "ModelManagerService", - "AnyModelConfig", "LoadedModel", ] diff --git a/invokeai/app/services/model_records/model_records_base.py b/invokeai/app/services/model_records/model_records_base.py index 740d548a4a3..2d34832dbe0 100644 --- a/invokeai/app/services/model_records/model_records_base.py +++ b/invokeai/app/services/model_records/model_records_base.py @@ -12,15 +12,14 @@ from invokeai.app.services.shared.pagination import PaginatedResults from invokeai.app.util.model_exclude_null import BaseModelExcludeNull -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - ControlAdapterDefaultSettings, - LoraModelDefaultSettings, - MainModelDefaultSettings, -) +from invokeai.backend.model_manager.configs.controlnet import ControlAdapterDefaultSettings +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.configs.lora import LoraModelDefaultSettings +from invokeai.backend.model_manager.configs.main import MainModelDefaultSettings from invokeai.backend.model_manager.taxonomy import ( BaseModelType, ClipVariantType, + FluxVariantType, ModelFormat, ModelSourceType, ModelType, @@ -90,7 +89,9 @@ class ModelRecordChanges(BaseModelExcludeNull): # Checkpoint-specific changes # TODO(MM2): Should we expose these? Feels footgun-y... - variant: Optional[ModelVariantType | ClipVariantType] = Field(description="The variant of the model.", default=None) + variant: Optional[ModelVariantType | ClipVariantType | FluxVariantType] = Field( + description="The variant of the model.", default=None + ) prediction_type: Optional[SchedulerPredictionType] = Field( description="The prediction type of the model.", default=None ) diff --git a/invokeai/app/services/model_records/model_records_sql.py b/invokeai/app/services/model_records/model_records_sql.py index e3b24a6e626..6d9a33ba4a6 100644 --- a/invokeai/app/services/model_records/model_records_sql.py +++ b/invokeai/app/services/model_records/model_records_sql.py @@ -58,10 +58,7 @@ ) from invokeai.app.services.shared.pagination import PaginatedResults from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - ModelConfigFactory, -) +from invokeai.backend.model_manager.configs.factory import AnyModelConfig, ModelConfigFactory from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType @@ -141,10 +138,25 @@ def update_model(self, key: str, changes: ModelRecordChanges) -> AnyModelConfig: with self._db.transaction() as cursor: record = self.get_model(key) - # Model configs use pydantic's `validate_assignment`, so each change is validated by pydantic. + # The changes may mean the model config class changes. So we need to: + # + # 1. convert the existing record to a dict + # 2. apply the changes to the dict + # 3. create a new model config from the updated dict + # + # This way we ensure that the update does not inadvertently create an invalid model config. + + # 1. convert the existing record to a dict + record_as_dict = record.model_dump() + + # 2. apply the changes to the dict for field_name in changes.model_fields_set: - setattr(record, field_name, getattr(changes, field_name)) + record_as_dict[field_name] = getattr(changes, field_name) + # 3. create a new model config from the updated dict + record = ModelConfigFactory.from_dict(record_as_dict) + + # If we get this far, the updated model config is valid, so we can save it to the database. json_serialized = record.model_dump_json() cursor.execute( @@ -172,7 +184,7 @@ def get_model(self, key: str) -> AnyModelConfig: with self._db.transaction() as cursor: cursor.execute( """--sql - SELECT config, strftime('%s',updated_at) FROM models + SELECT config FROM models WHERE id=?; """, (key,), @@ -180,14 +192,14 @@ def get_model(self, key: str) -> AnyModelConfig: rows = cursor.fetchone() if not rows: raise UnknownModelException("model not found") - model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1]) + model = ModelConfigFactory.from_dict(json.loads(rows[0])) return model def get_model_by_hash(self, hash: str) -> AnyModelConfig: with self._db.transaction() as cursor: cursor.execute( """--sql - SELECT config, strftime('%s',updated_at) FROM models + SELECT config FROM models WHERE hash=?; """, (hash,), @@ -195,7 +207,7 @@ def get_model_by_hash(self, hash: str) -> AnyModelConfig: rows = cursor.fetchone() if not rows: raise UnknownModelException("model not found") - model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1]) + model = ModelConfigFactory.from_dict(json.loads(rows[0])) return model def exists(self, key: str) -> bool: @@ -263,7 +275,7 @@ def search_by_attr( cursor.execute( f"""--sql - SELECT config, strftime('%s',updated_at) + SELECT config FROM models {where} ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason; @@ -276,15 +288,20 @@ def search_by_attr( results: list[AnyModelConfig] = [] for row in result: try: - model_config = ModelConfigFactory.make_config(json.loads(row[0]), timestamp=row[1]) - except pydantic.ValidationError: + model_config = ModelConfigFactory.from_dict(json.loads(row[0])) + except pydantic.ValidationError as e: # We catch this error so that the app can still run if there are invalid model configs in the database. # One reason that an invalid model config might be in the database is if someone had to rollback from a # newer version of the app that added a new model type. row_data = f"{row[0][:64]}..." if len(row[0]) > 64 else row[0] + try: + name = json.loads(row[0]).get("name", "") + except Exception: + name = "" self._logger.warning( - f"Found an invalid model config in the database. Ignoring this model. ({row_data})" + f"Skipping invalid model config in the database with name {name}. Ignoring this model. ({row_data})" ) + self._logger.warning(f"Validation error: {e}") else: results.append(model_config) @@ -295,12 +312,12 @@ def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]: with self._db.transaction() as cursor: cursor.execute( """--sql - SELECT config, strftime('%s',updated_at) FROM models + SELECT config FROM models WHERE path=?; """, (str(path),), ) - results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()] + results = [ModelConfigFactory.from_dict(json.loads(x[0])) for x in cursor.fetchall()] return results def search_by_hash(self, hash: str) -> List[AnyModelConfig]: @@ -308,12 +325,12 @@ def search_by_hash(self, hash: str) -> List[AnyModelConfig]: with self._db.transaction() as cursor: cursor.execute( """--sql - SELECT config, strftime('%s',updated_at) FROM models + SELECT config FROM models WHERE hash=?; """, (hash,), ) - results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()] + results = [ModelConfigFactory.from_dict(json.loads(x[0])) for x in cursor.fetchall()] return results def list_models( diff --git a/invokeai/app/services/model_relationships/model_relationships_default.py b/invokeai/app/services/model_relationships/model_relationships_default.py index 67fa6c0069d..e4da482ff27 100644 --- a/invokeai/app/services/model_relationships/model_relationships_default.py +++ b/invokeai/app/services/model_relationships/model_relationships_default.py @@ -1,6 +1,6 @@ from invokeai.app.services.invoker import Invoker from invokeai.app.services.model_relationships.model_relationships_base import ModelRelationshipsServiceABC -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig class ModelRelationshipsService(ModelRelationshipsServiceABC): diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py index 743b6208ead..97291230e04 100644 --- a/invokeai/app/services/shared/invocation_context.py +++ b/invokeai/app/services/shared/invocation_context.py @@ -19,10 +19,8 @@ from invokeai.app.services.session_processor.session_processor_common import ProgressImage from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection from invokeai.app.util.step_callback import diffusion_step_callback -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - ModelConfigBase, -) +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState @@ -558,7 +556,7 @@ def get_absolute_path(self, config_or_path: AnyModelConfig | Path | str) -> Path The absolute path to the model. """ - model_path = Path(config_or_path.path) if isinstance(config_or_path, ModelConfigBase) else Path(config_or_path) + model_path = Path(config_or_path.path) if isinstance(config_or_path, Config_Base) else Path(config_or_path) if model_path.is_absolute(): return model_path.resolve() diff --git a/invokeai/app/services/shared/sqlite_migrator/migrations/migration_22.py b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_22.py index c79b58bf2ad..2cd2101341e 100644 --- a/invokeai/app/services/shared/sqlite_migrator/migrations/migration_22.py +++ b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_22.py @@ -2,13 +2,13 @@ import sqlite3 from logging import Logger from pathlib import Path -from typing import NamedTuple +from typing import Any, NamedTuple from pydantic import ValidationError from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration -from invokeai.backend.model_manager.config import AnyModelConfig, AnyModelConfigValidator +from invokeai.backend.model_manager.configs.factory import AnyModelConfigValidator class NormalizeResult(NamedTuple): @@ -29,8 +29,9 @@ def __call__(self, cursor: sqlite3.Cursor) -> None: for model_id, config_json in rows: try: + migrated_config_dict = self._migrate_config(config_json) # Get the model config as a pydantic object - config = self._load_model_config(config_json) + config = AnyModelConfigValidator.validate_python(migrated_config_dict) except ValidationError: # This could happen if the config schema changed in a way that makes old configs invalid. Unlikely # for users, more likely for devs testing out migration paths. @@ -76,6 +77,26 @@ def __call__(self, cursor: sqlite3.Cursor) -> None: self._prune_empty_directories() + def _migrate_config(self, config_json: Any) -> str | None: + config_dict = json.loads(config_json) + + # TODO: migrate fields, review changes to ensure we hit all cases for v6.7.0 to v6.8.0 upgrade. + + # Prior to v6.8.0, we used an awkward combination of `config_path` and `variant` to distinguish between FLUX + # variants. + # + # `config_path` was set to one of: + # - flux-dev + # - flux-dev-fill + # - flux-schnell + # + # `variant` was set to ModelVariantType.Inpaint for FLUX Fill models and ModelVariantType.Normal for all other FLUX + # models. + # + # We now use the `variant` field to directly represent the FLUX variant type, and `config_path` is no longer used. + + return config_dict + def _normalize_model_storage(self, key: str, path_value: str) -> NormalizeResult: models_dir = self._models_dir stored_path = Path(path_value) @@ -216,11 +237,6 @@ def _prune_empty_directories(self) -> None: self._logger.info("Pruned %d empty directories under %s", len(removed_dirs), self._models_dir) - def _load_model_config(self, config_json: str) -> AnyModelConfig: - # The typing of the validator says it returns Unknown, but it's really a AnyModelConfig. This utility function - # just makes that clear. - return AnyModelConfigValidator.validate_json(config_json) - def build_migration_22(app_config: InvokeAIAppConfig, logger: Logger) -> Migration: """Builds the migration object for migrating from version 21 to version 22. diff --git a/invokeai/app/util/custom_openapi.py b/invokeai/app/util/custom_openapi.py index d6b8f3786f1..d400e0ff11b 100644 --- a/invokeai/app/util/custom_openapi.py +++ b/invokeai/app/util/custom_openapi.py @@ -12,6 +12,7 @@ from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.services.events.events_common import EventBase from invokeai.app.services.session_processor.session_processor_common import ProgressImage +from invokeai.backend.model_manager.configs.factory import AnyModelConfigValidator from invokeai.backend.util.logging import InvokeAILogger logger = InvokeAILogger.get_logger() @@ -115,6 +116,13 @@ def openapi() -> dict[str, Any]: # additional_schemas[1] is a dict of $defs that we need to add to the top level of the schema move_defs_to_top_level(openapi_schema, additional_schemas[1]) + any_model_config_schema = AnyModelConfigValidator.json_schema( + mode="serialization", + ref_template="#/components/schemas/{model}", + ) + move_defs_to_top_level(openapi_schema, any_model_config_schema) + openapi_schema["components"]["schemas"]["AnyModelConfig"] = any_model_config_schema + if post_transform is not None: openapi_schema = post_transform(openapi_schema) diff --git a/invokeai/backend/flux/controlnet/state_dict_utils.py b/invokeai/backend/flux/controlnet/state_dict_utils.py index aa44e6c10f0..87eae5a96bc 100644 --- a/invokeai/backend/flux/controlnet/state_dict_utils.py +++ b/invokeai/backend/flux/controlnet/state_dict_utils.py @@ -5,7 +5,7 @@ from invokeai.backend.flux.model import FluxParams -def is_state_dict_xlabs_controlnet(sd: Dict[str, Any]) -> bool: +def is_state_dict_xlabs_controlnet(sd: dict[str | int, Any]) -> bool: """Is the state dict for an XLabs ControlNet model? This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision. @@ -25,7 +25,7 @@ def is_state_dict_xlabs_controlnet(sd: Dict[str, Any]) -> bool: return False -def is_state_dict_instantx_controlnet(sd: Dict[str, Any]) -> bool: +def is_state_dict_instantx_controlnet(sd: dict[str | int, Any]) -> bool: """Is the state dict for an InstantX ControlNet model? This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision. diff --git a/invokeai/backend/flux/flux_state_dict_utils.py b/invokeai/backend/flux/flux_state_dict_utils.py index 8ffab54c688..c306c88f965 100644 --- a/invokeai/backend/flux/flux_state_dict_utils.py +++ b/invokeai/backend/flux/flux_state_dict_utils.py @@ -1,10 +1,7 @@ -from typing import TYPE_CHECKING +from typing import Any -if TYPE_CHECKING: - from invokeai.backend.model_manager.legacy_probe import CkptType - -def get_flux_in_channels_from_state_dict(state_dict: "CkptType") -> int | None: +def get_flux_in_channels_from_state_dict(state_dict: dict[str | int, Any]) -> int | None: """Gets the in channels from the state dict.""" # "Standard" FLUX models use "img_in.weight", but some community fine tunes use diff --git a/invokeai/backend/flux/ip_adapter/state_dict_utils.py b/invokeai/backend/flux/ip_adapter/state_dict_utils.py index 90f11ff642b..24ac53550f9 100644 --- a/invokeai/backend/flux/ip_adapter/state_dict_utils.py +++ b/invokeai/backend/flux/ip_adapter/state_dict_utils.py @@ -1,11 +1,11 @@ -from typing import Any, Dict +from typing import Any import torch from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterParams -def is_state_dict_xlabs_ip_adapter(sd: Dict[str, Any]) -> bool: +def is_state_dict_xlabs_ip_adapter(sd: dict[str | int, Any]) -> bool: """Is the state dict for an XLabs FLUX IP-Adapter model? This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision. @@ -27,7 +27,7 @@ def is_state_dict_xlabs_ip_adapter(sd: Dict[str, Any]) -> bool: return False -def infer_xlabs_ip_adapter_params_from_state_dict(state_dict: dict[str, torch.Tensor]) -> XlabsIpAdapterParams: +def infer_xlabs_ip_adapter_params_from_state_dict(state_dict: dict[str | int, torch.Tensor]) -> XlabsIpAdapterParams: num_double_blocks = 0 context_dim = 0 hidden_dim = 0 diff --git a/invokeai/backend/flux/redux/flux_redux_state_dict_utils.py b/invokeai/backend/flux/redux/flux_redux_state_dict_utils.py index a5a13b402d3..83e96d38451 100644 --- a/invokeai/backend/flux/redux/flux_redux_state_dict_utils.py +++ b/invokeai/backend/flux/redux/flux_redux_state_dict_utils.py @@ -1,7 +1,7 @@ -from typing import Any, Dict +from typing import Any -def is_state_dict_likely_flux_redux(state_dict: Dict[str, Any]) -> bool: +def is_state_dict_likely_flux_redux(state_dict: dict[str | int, Any]) -> bool: """Checks if the provided state dict is likely a FLUX Redux model.""" expected_keys = {"redux_down.bias", "redux_down.weight", "redux_up.bias", "redux_up.weight"} diff --git a/invokeai/backend/flux/util.py b/invokeai/backend/flux/util.py index 2a5261cb5c6..2cf52b6ec11 100644 --- a/invokeai/backend/flux/util.py +++ b/invokeai/backend/flux/util.py @@ -1,10 +1,11 @@ # Initially pulled from https://github.com/black-forest-labs/flux from dataclasses import dataclass -from typing import Dict, Literal +from typing import Literal from invokeai.backend.flux.model import FluxParams from invokeai.backend.flux.modules.autoencoder import AutoEncoderParams +from invokeai.backend.model_manager.taxonomy import AnyVariant, FluxVariantType @dataclass @@ -41,30 +42,39 @@ class ModelSpec: ] -max_seq_lengths: Dict[str, Literal[256, 512]] = { - "flux-dev": 512, - "flux-dev-fill": 512, - "flux-schnell": 256, +_flux_max_seq_lengths: dict[AnyVariant, Literal[256, 512]] = { + FluxVariantType.Dev: 512, + FluxVariantType.DevFill: 512, + FluxVariantType.Schnell: 256, } -ae_params = { - "flux": AutoEncoderParams( - resolution=256, - in_channels=3, - ch=128, - out_ch=3, - ch_mult=[1, 2, 4, 4], - num_res_blocks=2, - z_channels=16, - scale_factor=0.3611, - shift_factor=0.1159, - ) -} +def get_flux_max_seq_length(variant: AnyVariant): + try: + return _flux_max_seq_lengths[variant] + except KeyError: + raise ValueError(f"Unknown variant for FLUX max seq len: {variant}") + + +_flux_ae_params = AutoEncoderParams( + resolution=256, + in_channels=3, + ch=128, + out_ch=3, + ch_mult=[1, 2, 4, 4], + num_res_blocks=2, + z_channels=16, + scale_factor=0.3611, + shift_factor=0.1159, +) + +def get_flux_ae_params() -> AutoEncoderParams: + return _flux_ae_params -params = { - "flux-dev": FluxParams( + +_flux_transformer_params: dict[AnyVariant, FluxParams] = { + FluxVariantType.Dev: FluxParams( in_channels=64, vec_in_dim=768, context_in_dim=4096, @@ -78,7 +88,7 @@ class ModelSpec: qkv_bias=True, guidance_embed=True, ), - "flux-schnell": FluxParams( + FluxVariantType.Schnell: FluxParams( in_channels=64, vec_in_dim=768, context_in_dim=4096, @@ -92,7 +102,7 @@ class ModelSpec: qkv_bias=True, guidance_embed=False, ), - "flux-dev-fill": FluxParams( + FluxVariantType.DevFill: FluxParams( in_channels=384, out_channels=64, vec_in_dim=768, @@ -108,3 +118,10 @@ class ModelSpec: guidance_embed=True, ), } + + +def get_flux_transformers_params(variant: AnyVariant): + try: + return _flux_transformer_params[variant] + except KeyError: + raise ValueError(f"Unknown variant for FLUX transformer params: {variant}") diff --git a/invokeai/backend/model_manager/__init__.py b/invokeai/backend/model_manager/__init__.py index dca72f170e0..e69de29bb2d 100644 --- a/invokeai/backend/model_manager/__init__.py +++ b/invokeai/backend/model_manager/__init__.py @@ -1,45 +0,0 @@ -"""Re-export frequently-used symbols from the Model Manager backend.""" - -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - InvalidModelConfigException, - ModelConfigBase, - ModelConfigFactory, -) -from invokeai.backend.model_manager.legacy_probe import ModelProbe -from invokeai.backend.model_manager.load import LoadedModel -from invokeai.backend.model_manager.search import ModelSearch -from invokeai.backend.model_manager.taxonomy import ( - AnyModel, - AnyVariant, - BaseModelType, - ClipVariantType, - ModelFormat, - ModelRepoVariant, - ModelSourceType, - ModelType, - ModelVariantType, - SchedulerPredictionType, - SubModelType, -) - -__all__ = [ - "AnyModelConfig", - "InvalidModelConfigException", - "LoadedModel", - "ModelConfigFactory", - "ModelProbe", - "ModelSearch", - "ModelConfigBase", - "AnyModel", - "AnyVariant", - "BaseModelType", - "ClipVariantType", - "ModelFormat", - "ModelRepoVariant", - "ModelSourceType", - "ModelType", - "ModelVariantType", - "SchedulerPredictionType", - "SubModelType", -] diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py deleted file mode 100644 index 83f0c1d2bf5..00000000000 --- a/invokeai/backend/model_manager/config.py +++ /dev/null @@ -1,794 +0,0 @@ -# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team -""" -Configuration definitions for image generation models. - -Typical usage: - - from invokeai.backend.model_manager import ModelConfigFactory - raw = dict(path='models/sd-1/main/foo.ckpt', - name='foo', - base='sd-1', - type='main', - config='configs/stable-diffusion/v1-inference.yaml', - variant='normal', - format='checkpoint' - ) - config = ModelConfigFactory.make_config(raw) - print(config.name) - -Validation errors will raise an InvalidModelConfigException error. - -""" - -# pyright: reportIncompatibleVariableOverride=false -import json -import logging -import time -from abc import ABC, abstractmethod -from enum import Enum -from inspect import isabstract -from pathlib import Path -from typing import ClassVar, Literal, Optional, Type, TypeAlias, Union - -from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag, TypeAdapter -from typing_extensions import Annotated, Any, Dict - -from invokeai.app.services.config.config_default import get_config -from invokeai.app.util.misc import uuid_string -from invokeai.backend.model_hash.hash_validator import validate_hash -from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS -from invokeai.backend.model_manager.model_on_disk import ModelOnDisk -from invokeai.backend.model_manager.omi import flux_dev_1_lora, stable_diffusion_xl_1_lora -from invokeai.backend.model_manager.taxonomy import ( - AnyVariant, - BaseModelType, - ClipVariantType, - FluxLoRAFormat, - ModelFormat, - ModelRepoVariant, - ModelSourceType, - ModelType, - ModelVariantType, - SchedulerPredictionType, - SubModelType, -) -from invokeai.backend.model_manager.util.model_util import lora_token_vector_length -from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES - -logger = logging.getLogger(__name__) -app_config = get_config() - - -class InvalidModelConfigException(Exception): - """Exception for when config parser doesn't recognize this combination of model type and format.""" - - pass - - -DEFAULTS_PRECISION = Literal["fp16", "fp32"] - - -class SubmodelDefinition(BaseModel): - path_or_prefix: str - model_type: ModelType - variant: AnyVariant = None - - model_config = ConfigDict(protected_namespaces=()) - - -class MainModelDefaultSettings(BaseModel): - vae: str | None = Field(default=None, description="Default VAE for this model (model key)") - vae_precision: DEFAULTS_PRECISION | None = Field(default=None, description="Default VAE precision for this model") - scheduler: SCHEDULER_NAME_VALUES | None = Field(default=None, description="Default scheduler for this model") - steps: int | None = Field(default=None, gt=0, description="Default number of steps for this model") - cfg_scale: float | None = Field(default=None, ge=1, description="Default CFG Scale for this model") - cfg_rescale_multiplier: float | None = Field( - default=None, ge=0, lt=1, description="Default CFG Rescale Multiplier for this model" - ) - width: int | None = Field(default=None, multiple_of=8, ge=64, description="Default width for this model") - height: int | None = Field(default=None, multiple_of=8, ge=64, description="Default height for this model") - guidance: float | None = Field(default=None, ge=1, description="Default Guidance for this model") - - model_config = ConfigDict(extra="forbid") - - -class LoraModelDefaultSettings(BaseModel): - weight: float | None = Field(default=None, ge=-1, le=2, description="Default weight for this model") - model_config = ConfigDict(extra="forbid") - - -class ControlAdapterDefaultSettings(BaseModel): - # This could be narrowed to controlnet processor nodes, but they change. Leaving this a string is safer. - preprocessor: str | None - model_config = ConfigDict(extra="forbid") - - -class MatchSpeed(int, Enum): - """Represents the estimated runtime speed of a config's 'matches' method.""" - - FAST = 0 - MED = 1 - SLOW = 2 - - -class LegacyProbeMixin: - """Mixin for classes using the legacy probe for model classification.""" - - @classmethod - def matches(cls, *args, **kwargs): - raise NotImplementedError(f"Method 'matches' not implemented for {cls.__name__}") - - @classmethod - def parse(cls, *args, **kwargs): - raise NotImplementedError(f"Method 'parse' not implemented for {cls.__name__}") - - -class ModelConfigBase(ABC, BaseModel): - """ - Abstract Base class for model configurations. - - To create a new config type, inherit from this class and implement its interface: - - (mandatory) override methods 'matches' and 'parse' - - (mandatory) define fields 'type' and 'format' as class attributes - - - (optional) override method 'get_tag' - - (optional) override field _MATCH_SPEED - - See MinimalConfigExample in test_model_probe.py for an example implementation. - """ - - @staticmethod - def json_schema_extra(schema: dict[str, Any]) -> None: - schema["required"].extend(["key", "base", "type", "format"]) - - model_config = ConfigDict(validate_assignment=True, json_schema_extra=json_schema_extra) - - key: str = Field(description="A unique key for this model.", default_factory=uuid_string) - hash: str = Field(description="The hash of the model file(s).") - path: str = Field( - description="Path to the model on the filesystem. Relative paths are relative to the Invoke root directory." - ) - file_size: int = Field(description="The size of the model in bytes.") - name: str = Field(description="Name of the model.") - type: ModelType = Field(description="Model type") - format: ModelFormat = Field(description="Model format") - base: BaseModelType = Field(description="The base model.") - source: str = Field(description="The original source of the model (path, URL or repo_id).") - source_type: ModelSourceType = Field(description="The type of source") - - description: Optional[str] = Field(description="Model description", default=None) - source_api_response: Optional[str] = Field( - description="The original API response from the source, as stringified JSON.", default=None - ) - cover_image: Optional[str] = Field(description="Url for image to preview model", default=None) - submodels: Optional[Dict[SubModelType, SubmodelDefinition]] = Field( - description="Loadable submodels in this model", default=None - ) - usage_info: Optional[str] = Field(default=None, description="Usage information for this model") - - USING_LEGACY_PROBE: ClassVar[set[Type["ModelConfigBase"]]] = set() - USING_CLASSIFY_API: ClassVar[set[Type["ModelConfigBase"]]] = set() - _MATCH_SPEED: ClassVar[MatchSpeed] = MatchSpeed.MED - - def __init_subclass__(cls, **kwargs): - super().__init_subclass__(**kwargs) - if issubclass(cls, LegacyProbeMixin): - ModelConfigBase.USING_LEGACY_PROBE.add(cls) - # Cannot use `elif isinstance(cls, UnknownModelConfig)` because UnknownModelConfig is not defined yet - else: - ModelConfigBase.USING_CLASSIFY_API.add(cls) - - @staticmethod - def all_config_classes(): - subclasses = ModelConfigBase.USING_LEGACY_PROBE | ModelConfigBase.USING_CLASSIFY_API - concrete = {cls for cls in subclasses if not isabstract(cls)} - return concrete - - @staticmethod - def classify( - mod: str | Path | ModelOnDisk, hash_algo: HASHING_ALGORITHMS = "blake3_single", **overrides - ) -> "AnyModelConfig": - """ - Returns the best matching ModelConfig instance from a model's file/folder path. - Raises InvalidModelConfigException if no valid configuration is found. - Created to deprecate ModelProbe.probe - """ - if isinstance(mod, Path | str): - mod = ModelOnDisk(mod, hash_algo) - - candidates = ModelConfigBase.USING_CLASSIFY_API - sorted_by_match_speed = sorted(candidates, key=lambda cls: (cls._MATCH_SPEED, cls.__name__)) - - for config_cls in sorted_by_match_speed: - try: - if not config_cls.matches(mod): - continue - except Exception as e: - logger.warning(f"Unexpected exception while matching {mod.name} to '{config_cls.__name__}': {e}") - continue - else: - return config_cls.from_model_on_disk(mod, **overrides) - - if app_config.allow_unknown_models: - try: - return UnknownModelConfig.from_model_on_disk(mod, **overrides) - except Exception: - # Fall through to raising the exception below - pass - - raise InvalidModelConfigException("Unable to determine model type") - - @classmethod - def get_tag(cls) -> Tag: - type = cls.model_fields["type"].default.value - format = cls.model_fields["format"].default.value - return Tag(f"{type}.{format}") - - @classmethod - @abstractmethod - def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: - """Returns a dictionary with the fields needed to construct the model. - Raises InvalidModelConfigException if the model is invalid. - """ - pass - - @classmethod - @abstractmethod - def matches(cls, mod: ModelOnDisk) -> bool: - """Performs a quick check to determine if the config matches the model. - This doesn't need to be a perfect test - the aim is to eliminate unlikely matches quickly before parsing.""" - pass - - @staticmethod - def cast_overrides(overrides: dict[str, Any]): - """Casts user overrides from str to Enum""" - if "type" in overrides: - overrides["type"] = ModelType(overrides["type"]) - - if "format" in overrides: - overrides["format"] = ModelFormat(overrides["format"]) - - if "base" in overrides: - overrides["base"] = BaseModelType(overrides["base"]) - - if "source_type" in overrides: - overrides["source_type"] = ModelSourceType(overrides["source_type"]) - - if "variant" in overrides: - overrides["variant"] = ModelVariantType(overrides["variant"]) - - @classmethod - def from_model_on_disk(cls, mod: ModelOnDisk, **overrides): - """Creates an instance of this config or raises InvalidModelConfigException.""" - fields = cls.parse(mod) - cls.cast_overrides(overrides) - fields.update(overrides) - - fields["path"] = mod.path.as_posix() - fields["source"] = fields.get("source") or fields["path"] - fields["source_type"] = fields.get("source_type") or ModelSourceType.Path - fields["name"] = fields.get("name") or mod.name - fields["hash"] = fields.get("hash") or mod.hash() - fields["key"] = fields.get("key") or uuid_string() - fields["description"] = fields.get("description") - fields["repo_variant"] = fields.get("repo_variant") or mod.repo_variant() - fields["file_size"] = fields.get("file_size") or mod.size() - - return cls(**fields) - - -class UnknownModelConfig(ModelConfigBase): - base: Literal[BaseModelType.Unknown] = BaseModelType.Unknown - type: Literal[ModelType.Unknown] = ModelType.Unknown - format: Literal[ModelFormat.Unknown] = ModelFormat.Unknown - - @classmethod - def matches(cls, mod: ModelOnDisk) -> bool: - return False - - @classmethod - def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: - return {} - - -class CheckpointConfigBase(ABC, BaseModel): - """Base class for checkpoint-style models.""" - - format: Literal[ModelFormat.Checkpoint, ModelFormat.BnbQuantizednf4b, ModelFormat.GGUFQuantized] = Field( - description="Format of the provided checkpoint model", default=ModelFormat.Checkpoint - ) - config_path: str = Field(description="path to the checkpoint model config file") - converted_at: Optional[float] = Field( - description="When this model was last converted to diffusers", default_factory=time.time - ) - - -class DiffusersConfigBase(ABC, BaseModel): - """Base class for diffusers-style models.""" - - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - repo_variant: Optional[ModelRepoVariant] = ModelRepoVariant.Default - - -class LoRAConfigBase(ABC, BaseModel): - """Base class for LoRA models.""" - - type: Literal[ModelType.LoRA] = ModelType.LoRA - trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None) - default_settings: Optional[LoraModelDefaultSettings] = Field( - description="Default settings for this model", default=None - ) - - @classmethod - def flux_lora_format(cls, mod: ModelOnDisk): - key = "FLUX_LORA_FORMAT" - if key in mod.cache: - return mod.cache[key] - - from invokeai.backend.patches.lora_conversions.formats import flux_format_from_state_dict - - sd = mod.load_state_dict(mod.path) - value = flux_format_from_state_dict(sd, mod.metadata()) - mod.cache[key] = value - return value - - @classmethod - def base_model(cls, mod: ModelOnDisk) -> BaseModelType: - if cls.flux_lora_format(mod): - return BaseModelType.Flux - - state_dict = mod.load_state_dict() - # If we've gotten here, we assume that the model is a Stable Diffusion model - token_vector_length = lora_token_vector_length(state_dict) - if token_vector_length == 768: - return BaseModelType.StableDiffusion1 - elif token_vector_length == 1024: - return BaseModelType.StableDiffusion2 - elif token_vector_length == 1280: - return BaseModelType.StableDiffusionXL # recognizes format at https://civitai.com/models/224641 - elif token_vector_length == 2048: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelConfigException("Unknown LoRA type") - - -class T5EncoderConfigBase(ABC, BaseModel): - """Base class for diffusers-style models.""" - - type: Literal[ModelType.T5Encoder] = ModelType.T5Encoder - - -class T5EncoderConfig(T5EncoderConfigBase, LegacyProbeMixin, ModelConfigBase): - format: Literal[ModelFormat.T5Encoder] = ModelFormat.T5Encoder - - -class T5EncoderBnbQuantizedLlmInt8bConfig(T5EncoderConfigBase, LegacyProbeMixin, ModelConfigBase): - format: Literal[ModelFormat.BnbQuantizedLlmInt8b] = ModelFormat.BnbQuantizedLlmInt8b - - -class LoRAOmiConfig(LoRAConfigBase, ModelConfigBase): - format: Literal[ModelFormat.OMI] = ModelFormat.OMI - - @classmethod - def matches(cls, mod: ModelOnDisk) -> bool: - if mod.path.is_dir(): - return False - - metadata = mod.metadata() - return ( - bool(metadata.get("modelspec.sai_model_spec")) - and metadata.get("ot_branch") == "omi_format" - and metadata["modelspec.architecture"].split("/")[1].lower() == "lora" - ) - - @classmethod - def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: - metadata = mod.metadata() - architecture = metadata["modelspec.architecture"] - - if architecture == stable_diffusion_xl_1_lora: - base = BaseModelType.StableDiffusionXL - elif architecture == flux_dev_1_lora: - base = BaseModelType.Flux - else: - raise InvalidModelConfigException(f"Unrecognised/unsupported architecture for OMI LoRA: {architecture}") - - return {"base": base} - - -class LoRALyCORISConfig(LoRAConfigBase, ModelConfigBase): - """Model config for LoRA/Lycoris models.""" - - format: Literal[ModelFormat.LyCORIS] = ModelFormat.LyCORIS - - @classmethod - def matches(cls, mod: ModelOnDisk) -> bool: - if mod.path.is_dir(): - return False - - # Avoid false positive match against ControlLoRA and Diffusers - if cls.flux_lora_format(mod) in [FluxLoRAFormat.Control, FluxLoRAFormat.Diffusers]: - return False - - state_dict = mod.load_state_dict() - for key in state_dict.keys(): - if isinstance(key, int): - continue - - if key.startswith(("lora_te_", "lora_unet_", "lora_te1_", "lora_te2_", "lora_transformer_")): - return True - # "lora_A.weight" and "lora_B.weight" are associated with models in PEFT format. We don't support all PEFT - # LoRA models, but as of the time of writing, we support Diffusers FLUX PEFT LoRA models. - if key.endswith(("to_k_lora.up.weight", "to_q_lora.down.weight", "lora_A.weight", "lora_B.weight")): - return True - - return False - - @classmethod - def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: - return { - "base": cls.base_model(mod), - } - - -class ControlAdapterConfigBase(ABC, BaseModel): - default_settings: Optional[ControlAdapterDefaultSettings] = Field( - description="Default settings for this model", default=None - ) - - -class ControlLoRALyCORISConfig(ControlAdapterConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for Control LoRA models.""" - - type: Literal[ModelType.ControlLoRa] = ModelType.ControlLoRa - trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None) - format: Literal[ModelFormat.LyCORIS] = ModelFormat.LyCORIS - - -class ControlLoRADiffusersConfig(ControlAdapterConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for Control LoRA models.""" - - type: Literal[ModelType.ControlLoRa] = ModelType.ControlLoRa - trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None) - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - -class LoRADiffusersConfig(LoRAConfigBase, ModelConfigBase): - """Model config for LoRA/Diffusers models.""" - - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - @classmethod - def matches(cls, mod: ModelOnDisk) -> bool: - if mod.path.is_file(): - return cls.flux_lora_format(mod) == FluxLoRAFormat.Diffusers - - suffixes = ["bin", "safetensors"] - weight_files = [mod.path / f"pytorch_lora_weights.{sfx}" for sfx in suffixes] - return any(wf.exists() for wf in weight_files) - - @classmethod - def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: - return { - "base": cls.base_model(mod), - } - - -class VAECheckpointConfig(CheckpointConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for standalone VAE models.""" - - type: Literal[ModelType.VAE] = ModelType.VAE - - -class VAEDiffusersConfig(LegacyProbeMixin, ModelConfigBase): - """Model config for standalone VAE models (diffusers version).""" - - type: Literal[ModelType.VAE] = ModelType.VAE - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - -class ControlNetDiffusersConfig(DiffusersConfigBase, ControlAdapterConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for ControlNet models (diffusers version).""" - - type: Literal[ModelType.ControlNet] = ModelType.ControlNet - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - -class ControlNetCheckpointConfig(CheckpointConfigBase, ControlAdapterConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for ControlNet models (diffusers version).""" - - type: Literal[ModelType.ControlNet] = ModelType.ControlNet - - -class TextualInversionFileConfig(LegacyProbeMixin, ModelConfigBase): - """Model config for textual inversion embeddings.""" - - type: Literal[ModelType.TextualInversion] = ModelType.TextualInversion - format: Literal[ModelFormat.EmbeddingFile] = ModelFormat.EmbeddingFile - - -class TextualInversionFolderConfig(LegacyProbeMixin, ModelConfigBase): - """Model config for textual inversion embeddings.""" - - type: Literal[ModelType.TextualInversion] = ModelType.TextualInversion - format: Literal[ModelFormat.EmbeddingFolder] = ModelFormat.EmbeddingFolder - - -class MainConfigBase(ABC, BaseModel): - type: Literal[ModelType.Main] = ModelType.Main - trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None) - default_settings: Optional[MainModelDefaultSettings] = Field( - description="Default settings for this model", default=None - ) - variant: AnyVariant = ModelVariantType.Normal - - -class VideoConfigBase(ABC, BaseModel): - type: Literal[ModelType.Video] = ModelType.Video - trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None) - default_settings: Optional[MainModelDefaultSettings] = Field( - description="Default settings for this model", default=None - ) - variant: AnyVariant = ModelVariantType.Normal - - -class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for main checkpoint models.""" - - prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon - upcast_attention: bool = False - - -class MainBnbQuantized4bCheckpointConfig(CheckpointConfigBase, MainConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for main checkpoint models.""" - - format: Literal[ModelFormat.BnbQuantizednf4b] = ModelFormat.BnbQuantizednf4b - prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon - upcast_attention: bool = False - - -class MainGGUFCheckpointConfig(CheckpointConfigBase, MainConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for main checkpoint models.""" - - format: Literal[ModelFormat.GGUFQuantized] = ModelFormat.GGUFQuantized - prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon - upcast_attention: bool = False - - -class MainDiffusersConfig(DiffusersConfigBase, MainConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for main diffusers models.""" - - pass - - -class IPAdapterConfigBase(ABC, BaseModel): - type: Literal[ModelType.IPAdapter] = ModelType.IPAdapter - - -class IPAdapterInvokeAIConfig(IPAdapterConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for IP Adapter diffusers format models.""" - - # TODO(ryand): Should we deprecate this field? From what I can tell, it hasn't been probed correctly for a long - # time. Need to go through the history to make sure I'm understanding this fully. - image_encoder_model_id: str - format: Literal[ModelFormat.InvokeAI] = ModelFormat.InvokeAI - - -class IPAdapterCheckpointConfig(IPAdapterConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for IP Adapter checkpoint format models.""" - - format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint - - -class CLIPEmbedDiffusersConfig(DiffusersConfigBase): - """Model config for Clip Embeddings.""" - - variant: ClipVariantType = Field(description="Clip variant for this model") - type: Literal[ModelType.CLIPEmbed] = ModelType.CLIPEmbed - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - -class CLIPGEmbedDiffusersConfig(CLIPEmbedDiffusersConfig, LegacyProbeMixin, ModelConfigBase): - """Model config for CLIP-G Embeddings.""" - - variant: Literal[ClipVariantType.G] = ClipVariantType.G - - @classmethod - def get_tag(cls) -> Tag: - return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}.{ClipVariantType.G.value}") - - -class CLIPLEmbedDiffusersConfig(CLIPEmbedDiffusersConfig, LegacyProbeMixin, ModelConfigBase): - """Model config for CLIP-L Embeddings.""" - - variant: Literal[ClipVariantType.L] = ClipVariantType.L - - @classmethod - def get_tag(cls) -> Tag: - return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}.{ClipVariantType.L.value}") - - -class CLIPVisionDiffusersConfig(DiffusersConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for CLIPVision.""" - - type: Literal[ModelType.CLIPVision] = ModelType.CLIPVision - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - -class T2IAdapterConfig(DiffusersConfigBase, ControlAdapterConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for T2I.""" - - type: Literal[ModelType.T2IAdapter] = ModelType.T2IAdapter - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - -class SpandrelImageToImageConfig(LegacyProbeMixin, ModelConfigBase): - """Model config for Spandrel Image to Image models.""" - - _MATCH_SPEED: ClassVar[MatchSpeed] = MatchSpeed.SLOW # requires loading the model from disk - - type: Literal[ModelType.SpandrelImageToImage] = ModelType.SpandrelImageToImage - format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint - - -class SigLIPConfig(DiffusersConfigBase, LegacyProbeMixin, ModelConfigBase): - """Model config for SigLIP.""" - - type: Literal[ModelType.SigLIP] = ModelType.SigLIP - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - -class FluxReduxConfig(LegacyProbeMixin, ModelConfigBase): - """Model config for FLUX Tools Redux model.""" - - type: Literal[ModelType.FluxRedux] = ModelType.FluxRedux - format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint - - -class LlavaOnevisionConfig(DiffusersConfigBase, ModelConfigBase): - """Model config for Llava Onevision models.""" - - type: Literal[ModelType.LlavaOnevision] = ModelType.LlavaOnevision - format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers - - @classmethod - def matches(cls, mod: ModelOnDisk) -> bool: - if mod.path.is_file(): - return False - - config_path = mod.path / "config.json" - try: - with open(config_path, "r") as file: - config = json.load(file) - except FileNotFoundError: - return False - - architectures = config.get("architectures") - return architectures and architectures[0] == "LlavaOnevisionForConditionalGeneration" - - @classmethod - def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: - return { - "base": BaseModelType.Any, - "variant": ModelVariantType.Normal, - } - - -class ApiModelConfig(MainConfigBase, ModelConfigBase): - """Model config for API-based models.""" - - format: Literal[ModelFormat.Api] = ModelFormat.Api - - @classmethod - def matches(cls, mod: ModelOnDisk) -> bool: - # API models are not stored on disk, so we can't match them. - return False - - @classmethod - def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: - raise NotImplementedError("API models are not parsed from disk.") - - -class VideoApiModelConfig(VideoConfigBase, ModelConfigBase): - """Model config for API-based video models.""" - - format: Literal[ModelFormat.Api] = ModelFormat.Api - - @classmethod - def matches(cls, mod: ModelOnDisk) -> bool: - # API models are not stored on disk, so we can't match them. - return False - - @classmethod - def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: - raise NotImplementedError("API models are not parsed from disk.") - - -def get_model_discriminator_value(v: Any) -> str: - """ - Computes the discriminator value for a model config. - https://docs.pydantic.dev/latest/concepts/unions/#discriminated-unions-with-callable-discriminator - """ - format_ = type_ = variant_ = None - - if isinstance(v, dict): - format_ = v.get("format") - if isinstance(format_, Enum): - format_ = format_.value - - type_ = v.get("type") - if isinstance(type_, Enum): - type_ = type_.value - - variant_ = v.get("variant") - if isinstance(variant_, Enum): - variant_ = variant_.value - else: - format_ = v.format.value - type_ = v.type.value - variant_ = getattr(v, "variant", None) - if variant_: - variant_ = variant_.value - - # Ideally, each config would be uniquely identified with a combination of fields - # i.e. (type, format, variant) without any special cases. Alas... - - # Previously, CLIPEmbed did not have any variants, meaning older database entries lack a variant field. - # To maintain compatibility, we default to ClipVariantType.L in this case. - if type_ == ModelType.CLIPEmbed.value and format_ == ModelFormat.Diffusers.value: - variant_ = variant_ or ClipVariantType.L.value - return f"{type_}.{format_}.{variant_}" - return f"{type_}.{format_}" - - -# The types are listed explicitly because IDEs/LSPs can't identify the correct types -# when AnyModelConfig is constructed dynamically using ModelConfigBase.all_config_classes -AnyModelConfig = Annotated[ - Union[ - Annotated[MainDiffusersConfig, MainDiffusersConfig.get_tag()], - Annotated[MainCheckpointConfig, MainCheckpointConfig.get_tag()], - Annotated[MainBnbQuantized4bCheckpointConfig, MainBnbQuantized4bCheckpointConfig.get_tag()], - Annotated[MainGGUFCheckpointConfig, MainGGUFCheckpointConfig.get_tag()], - Annotated[VAEDiffusersConfig, VAEDiffusersConfig.get_tag()], - Annotated[VAECheckpointConfig, VAECheckpointConfig.get_tag()], - Annotated[ControlNetDiffusersConfig, ControlNetDiffusersConfig.get_tag()], - Annotated[ControlNetCheckpointConfig, ControlNetCheckpointConfig.get_tag()], - Annotated[LoRALyCORISConfig, LoRALyCORISConfig.get_tag()], - Annotated[LoRAOmiConfig, LoRAOmiConfig.get_tag()], - Annotated[ControlLoRALyCORISConfig, ControlLoRALyCORISConfig.get_tag()], - Annotated[ControlLoRADiffusersConfig, ControlLoRADiffusersConfig.get_tag()], - Annotated[LoRADiffusersConfig, LoRADiffusersConfig.get_tag()], - Annotated[T5EncoderConfig, T5EncoderConfig.get_tag()], - Annotated[T5EncoderBnbQuantizedLlmInt8bConfig, T5EncoderBnbQuantizedLlmInt8bConfig.get_tag()], - Annotated[TextualInversionFileConfig, TextualInversionFileConfig.get_tag()], - Annotated[TextualInversionFolderConfig, TextualInversionFolderConfig.get_tag()], - Annotated[IPAdapterInvokeAIConfig, IPAdapterInvokeAIConfig.get_tag()], - Annotated[IPAdapterCheckpointConfig, IPAdapterCheckpointConfig.get_tag()], - Annotated[T2IAdapterConfig, T2IAdapterConfig.get_tag()], - Annotated[SpandrelImageToImageConfig, SpandrelImageToImageConfig.get_tag()], - Annotated[CLIPVisionDiffusersConfig, CLIPVisionDiffusersConfig.get_tag()], - Annotated[CLIPLEmbedDiffusersConfig, CLIPLEmbedDiffusersConfig.get_tag()], - Annotated[CLIPGEmbedDiffusersConfig, CLIPGEmbedDiffusersConfig.get_tag()], - Annotated[SigLIPConfig, SigLIPConfig.get_tag()], - Annotated[FluxReduxConfig, FluxReduxConfig.get_tag()], - Annotated[LlavaOnevisionConfig, LlavaOnevisionConfig.get_tag()], - Annotated[ApiModelConfig, ApiModelConfig.get_tag()], - Annotated[VideoApiModelConfig, VideoApiModelConfig.get_tag()], - Annotated[UnknownModelConfig, UnknownModelConfig.get_tag()], - ], - Discriminator(get_model_discriminator_value), -] - -AnyModelConfigValidator = TypeAdapter(AnyModelConfig) -AnyDefaultSettings: TypeAlias = Union[MainModelDefaultSettings, LoraModelDefaultSettings, ControlAdapterDefaultSettings] - - -class ModelConfigFactory: - @staticmethod - def make_config(model_data: Dict[str, Any], timestamp: Optional[float] = None) -> AnyModelConfig: - """Return the appropriate config object from raw dict values.""" - model = AnyModelConfigValidator.validate_python(model_data) # type: ignore - if isinstance(model, CheckpointConfigBase) and timestamp: - model.converted_at = timestamp - validate_hash(model.hash) - return model # type: ignore diff --git a/invokeai/backend/model_manager/configs/__init__.py b/invokeai/backend/model_manager/configs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/invokeai/backend/model_manager/configs/base.py b/invokeai/backend/model_manager/configs/base.py new file mode 100644 index 00000000000..e67efd20097 --- /dev/null +++ b/invokeai/backend/model_manager/configs/base.py @@ -0,0 +1,243 @@ +from abc import ABC, abstractmethod +from enum import Enum +from inspect import isabstract +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Literal, + Self, + Type, +) + +from pydantic import BaseModel, ConfigDict, Field, Tag +from pydantic_core import PydanticUndefined + +from invokeai.app.util.misc import uuid_string +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + AnyVariant, + BaseModelType, + ModelFormat, + ModelRepoVariant, + ModelSourceType, + ModelType, +) + +if TYPE_CHECKING: + pass + + +class Config_Base(ABC, BaseModel): + """ + Abstract base class for model configurations. A model config describes a specific combination of model base, type and + format, along with other metadata about the model. For example, a Stable Diffusion 1.x main model in checkpoint format + would have base=sd-1, type=main, format=checkpoint. + + To create a new config type, inherit from this class and implement its interface: + - Define method 'from_model_on_disk' that returns an instance of the class or raises NotAMatch. This method will be + called during model installation to determine the correct config class for a model. + - Define fields 'type', 'base' and 'format' as pydantic fields. These should be Literals with a single value. A + default must be provided for each of these fields. + + If multiple combinations of base, type and format need to be supported, create a separate subclass for each. + + See MinimalConfigExample in test_model_probe.py for an example implementation. + """ + + # These fields are common to all model configs. + + key: str = Field( + default_factory=uuid_string, + description="A unique key for this model.", + ) + hash: str = Field( + description="The hash of the model file(s).", + ) + path: str = Field( + description="Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.", + ) + file_size: int = Field( + description="The size of the model in bytes.", + ) + name: str = Field( + description="Name of the model.", + ) + description: str | None = Field( + default=None, + description="Model description", + ) + source: str = Field( + description="The original source of the model (path, URL or repo_id).", + ) + source_type: ModelSourceType = Field( + description="The type of source", + ) + source_api_response: str | None = Field( + default=None, + description="The original API response from the source, as stringified JSON.", + ) + cover_image: str | None = Field( + default=None, + description="Url for image to preview model", + ) + usage_info: str | None = Field( + default=None, + description="Usage information for this model", + ) + + CONFIG_CLASSES: ClassVar[set[Type["Config_Base"]]] = set() + """Set of all non-abstract subclasses of Config_Base, for use during model probing. In other words, this is the set + of all known model config types.""" + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + json_schema_mode_override="serialization", + ) + + @classmethod + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + # Register non-abstract subclasses so we can iterate over them later during model probing. Note that + # isabstract() will return False if the class does not have any abstract methods, even if it inherits from ABC. + # We must check for ABC lest we unintentionally register some abstract model config classes. + if not isabstract(cls) and ABC not in cls.__bases__: + cls.CONFIG_CLASSES.add(cls) + + @classmethod + def __pydantic_init_subclass__(cls, **kwargs): + # Ensure that model configs define 'base', 'type' and 'format' fields and provide defaults for them. Each + # subclass is expected to represent a single combination of base, type and format. + # + # This pydantic dunder method is called after the pydantic model for a class is created. The normal + # __init_subclass__ is too early to do this check. + for name in ("type", "base", "format"): + if name not in cls.model_fields: + raise NotImplementedError(f"{cls.__name__} must define a '{name}' field") + if cls.model_fields[name].default is PydanticUndefined: + raise NotImplementedError(f"{cls.__name__} must define a default for the '{name}' field") + + @classmethod + def get_tag(cls) -> Tag: + """Constructs a pydantic discriminated union tag for this model config class. When a config is deserialized, + pydantic uses the tag to determine which subclass to instantiate. + + The tag is a dot-separated string of the type, format, base and variant (if applicable). + """ + tag_strings: list[str] = [] + for name in ("type", "format", "base", "variant"): + if field := cls.model_fields.get(name): + # The check in __pydantic_init_subclass__ ensures that type, format and base are always present with + # defaults. variant does not require a default, but if it has one, we need to add it to the tag. We can + # check for the presence of a default by seeing if it's not PydanticUndefined, a sentinel value used by + # pydantic to indicate that no default was provided. + if field.default is not PydanticUndefined: + # We expect each of these fields has an Enum for its default; we want the value of the enum. + tag_strings.append(field.default.value) + return Tag(".".join(tag_strings)) + + @staticmethod + def get_model_discriminator_value(v: Any) -> str: + """Computes the discriminator value for a model config discriminated union.""" + # This is called by pydantic during deserialization and serialization to determine which model the data + # represents. It can get either a dict (during deserialization) or an instance of a Config_Base subclass + # (during serialization). + # + # See: https://docs.pydantic.dev/latest/concepts/unions/#discriminated-unions-with-callable-discriminator + if isinstance(v, Config_Base): + # We have an instance of a ModelConfigBase subclass - use its tag directly. + return v.get_tag().tag + if isinstance(v, dict): + # We have a dict - attempt to compute a tag from its fields. + tag_strings: list[str] = [] + if type_ := v.get("type"): + if isinstance(type_, Enum): + type_ = str(type_.value) + elif not isinstance(type_, str): + raise TypeError("Model config dict 'type' field must be a string or Enum") + tag_strings.append(type_) + + if format_ := v.get("format"): + if isinstance(format_, Enum): + format_ = str(format_.value) + elif not isinstance(format_, str): + raise TypeError("Model config dict 'format' field must be a string or Enum") + tag_strings.append(format_) + + if base_ := v.get("base"): + if isinstance(base_, Enum): + base_ = str(base_.value) + elif not isinstance(base_, str): + raise TypeError("Model config dict 'base' field must be a string or Enum") + tag_strings.append(base_) + + # Special case: CLIP Embed models also need the variant to distinguish them. + if ( + type_ == ModelType.CLIPEmbed.value + and format_ == ModelFormat.Diffusers.value + and base_ == BaseModelType.Any.value + ): + if variant_ := v.get("variant"): + if isinstance(variant_, Enum): + variant_ = variant_.value + elif not isinstance(variant_, str): + raise TypeError("Model config dict 'variant' field must be a string or Enum") + tag_strings.append(variant_) + else: + raise ValueError("CLIP Embed model config dict must include a 'variant' field") + + return ".".join(tag_strings) + else: + raise TypeError("Model config discriminator value must be computed from a dict or ModelConfigBase instance") + + @classmethod + @abstractmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + """Given the model on disk and any override fields, attempt to construct an instance of this config class. + + This method serves to identify whether the model on disk matches this config class, and if so, to extract any + additional metadata needed to instantiate the config. + + Implementations should raise a NotAMatchError if the model does not match this config class.""" + raise NotImplementedError(f"from_model_on_disk not implemented for {cls.__name__}") + + +class Checkpoint_Config_Base(ABC, BaseModel): + """Base class for checkpoint-style models.""" + + config_path: str | None = Field( + description="Path to the config for this model, if any.", + default=None, + ) + + +class Diffusers_Config_Base(ABC, BaseModel): + """Base class for diffusers-style models.""" + + format: Literal[ModelFormat.Diffusers] = Field(default=ModelFormat.Diffusers) + repo_variant: ModelRepoVariant = Field(ModelRepoVariant.Default) + + @classmethod + def _get_repo_variant_or_raise(cls, mod: ModelOnDisk) -> ModelRepoVariant: + # get all files ending in .bin or .safetensors + weight_files = list(mod.path.glob("**/*.safetensors")) + weight_files.extend(list(mod.path.glob("**/*.bin"))) + for x in weight_files: + if ".fp16" in x.suffixes: + return ModelRepoVariant.FP16 + if "openvino_model" in x.name: + return ModelRepoVariant.OpenVINO + if "flax_model" in x.name: + return ModelRepoVariant.Flax + if x.suffix == ".onnx": + return ModelRepoVariant.ONNX + return ModelRepoVariant.Default + + +class SubmodelDefinition(BaseModel): + path_or_prefix: str + model_type: ModelType + variant: AnyVariant | None = None + + model_config = ConfigDict(protected_namespaces=()) diff --git a/invokeai/backend/model_manager/configs/clip_embed.py b/invokeai/backend/model_manager/configs/clip_embed.py new file mode 100644 index 00000000000..4bb24a0a637 --- /dev/null +++ b/invokeai/backend/model_manager/configs/clip_embed.py @@ -0,0 +1,91 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + get_config_dict_or_raise, + raise_for_class_name, + raise_for_override_fields, + raise_if_not_dir, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ClipVariantType, + ModelFormat, + ModelType, +) + + +def get_clip_variant_type_from_config(config: dict[str, Any]) -> ClipVariantType | None: + try: + hidden_size = config.get("hidden_size") + match hidden_size: + case 1280: + return ClipVariantType.G + case 768: + return ClipVariantType.L + case _: + return None + except Exception: + return None + + +class CLIPEmbed_Diffusers_Config_Base(Diffusers_Config_Base): + base: Literal[BaseModelType.Any] = Field(default=BaseModelType.Any) + type: Literal[ModelType.CLIPEmbed] = Field(default=ModelType.CLIPEmbed) + format: Literal[ModelFormat.Diffusers] = Field(default=ModelFormat.Diffusers) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + raise_for_class_name( + { + mod.path / "config.json", + mod.path / "text_encoder" / "config.json", + }, + { + "CLIPModel", + "CLIPTextModel", + "CLIPTextModelWithProjection", + }, + ) + + cls._validate_variant(mod) + + return cls(**override_fields) + + @classmethod + def _validate_variant(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model variant does not match this config class.""" + expected_variant = cls.model_fields["variant"].default + config = get_config_dict_or_raise( + { + mod.path / "config.json", + mod.path / "text_encoder" / "config.json", + }, + ) + recognized_variant = get_clip_variant_type_from_config(config) + + if recognized_variant is None: + raise NotAMatchError("unable to determine CLIP variant from config") + + if expected_variant is not recognized_variant: + raise NotAMatchError(f"variant is {recognized_variant}, not {expected_variant}") + + +class CLIPEmbed_Diffusers_G_Config(CLIPEmbed_Diffusers_Config_Base, Config_Base): + variant: Literal[ClipVariantType.G] = Field(default=ClipVariantType.G) + + +class CLIPEmbed_Diffusers_L_Config(CLIPEmbed_Diffusers_Config_Base, Config_Base): + variant: Literal[ClipVariantType.L] = Field(default=ClipVariantType.L) diff --git a/invokeai/backend/model_manager/configs/clip_vision.py b/invokeai/backend/model_manager/configs/clip_vision.py new file mode 100644 index 00000000000..af5a539bc18 --- /dev/null +++ b/invokeai/backend/model_manager/configs/clip_vision.py @@ -0,0 +1,57 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + get_class_name_from_config_dict_or_raise, + get_config_dict_or_raise, + raise_for_override_fields, + raise_if_not_dir, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + + +class CLIPVision_Diffusers_Config(Diffusers_Config_Base, Config_Base): + """Model config for CLIPVision.""" + + base: Literal[BaseModelType.Any] = Field(default=BaseModelType.Any) + type: Literal[ModelType.CLIPVision] = Field(default=ModelType.CLIPVision) + format: Literal[ModelFormat.Diffusers] = Field(default=ModelFormat.Diffusers) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + cls.raise_if_config_doesnt_look_like_clip_vision(mod) + + return cls(**override_fields) + + @classmethod + def raise_if_config_doesnt_look_like_clip_vision(cls, mod: ModelOnDisk) -> None: + config_dict = get_config_dict_or_raise(mod.path / "config.json") + class_name = get_class_name_from_config_dict_or_raise(config_dict) + + if class_name == "CLIPVisionModelWithProjection": + looks_like_clip_vision = True + elif class_name == "CLIPModel" and "vision_config" in config_dict: + looks_like_clip_vision = True + else: + looks_like_clip_vision = False + + if not looks_like_clip_vision: + raise NotAMatchError( + f"config class name is {class_name}, not CLIPVisionModelWithProjection or CLIPModel with vision_config" + ) diff --git a/invokeai/backend/model_manager/configs/controlnet.py b/invokeai/backend/model_manager/configs/controlnet.py new file mode 100644 index 00000000000..630e81fd243 --- /dev/null +++ b/invokeai/backend/model_manager/configs/controlnet.py @@ -0,0 +1,230 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import BaseModel, ConfigDict, Field +from typing_extensions import Any + +from invokeai.backend.flux.controlnet.state_dict_utils import ( + is_state_dict_instantx_controlnet, + is_state_dict_xlabs_controlnet, +) +from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + common_config_paths, + get_config_dict_or_raise, + raise_for_class_name, + raise_for_override_fields, + raise_if_not_dir, + raise_if_not_file, + state_dict_has_any_keys_starting_with, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + +MODEL_NAME_TO_PREPROCESSOR = { + "canny": "canny_image_processor", + "mlsd": "mlsd_image_processor", + "depth": "depth_anything_image_processor", + "bae": "normalbae_image_processor", + "normal": "normalbae_image_processor", + "sketch": "pidi_image_processor", + "scribble": "lineart_image_processor", + "lineart anime": "lineart_anime_image_processor", + "lineart_anime": "lineart_anime_image_processor", + "lineart": "lineart_image_processor", + "soft": "hed_image_processor", + "softedge": "hed_image_processor", + "hed": "hed_image_processor", + "shuffle": "content_shuffle_image_processor", + "pose": "dw_openpose_image_processor", + "mediapipe": "mediapipe_face_processor", + "pidi": "pidi_image_processor", + "zoe": "zoe_depth_image_processor", + "color": "color_map_image_processor", +} + + +class ControlAdapterDefaultSettings(BaseModel): + # This could be narrowed to controlnet processor nodes, but they change. Leaving this a string is safer. + preprocessor: str | None + model_config = ConfigDict(extra="forbid") + + @classmethod + def from_model_name(cls, model_name: str) -> Self: + for k, v in MODEL_NAME_TO_PREPROCESSOR.items(): + model_name_lower = model_name.lower() + if k in model_name_lower: + return cls(preprocessor=v) + return cls(preprocessor=None) + + +class ControlNet_Diffusers_Config_Base(Diffusers_Config_Base): + """Model config for ControlNet models (diffusers version).""" + + type: Literal[ModelType.ControlNet] = Field(default=ModelType.ControlNet) + format: Literal[ModelFormat.Diffusers] = Field(default=ModelFormat.Diffusers) + default_settings: ControlAdapterDefaultSettings | None = Field(None) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + raise_for_class_name( + common_config_paths(mod.path), + { + "ControlNetModel", + "FluxControlNetModel", + }, + ) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + config_dict = get_config_dict_or_raise(common_config_paths(mod.path)) + + if config_dict.get("_class_name") == "FluxControlNetModel": + return BaseModelType.Flux + + dimension = config_dict.get("cross_attention_dim") + + match dimension: + case 768: + return BaseModelType.StableDiffusion1 + case 1024: + # No obvious way to distinguish between sd2-base and sd2-768, but we don't really differentiate them + # anyway. + return BaseModelType.StableDiffusion2 + case 2048: + return BaseModelType.StableDiffusionXL + case _: + raise NotAMatchError(f"unrecognized cross_attention_dim {dimension}") + + +class ControlNet_Diffusers_SD1_Config(ControlNet_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class ControlNet_Diffusers_SD2_Config(ControlNet_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class ControlNet_Diffusers_SDXL_Config(ControlNet_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class ControlNet_Diffusers_FLUX_Config(ControlNet_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + + +class ControlNet_Checkpoint_Config_Base(Checkpoint_Config_Base): + """Model config for ControlNet models (diffusers version).""" + + type: Literal[ModelType.ControlNet] = Field(default=ModelType.ControlNet) + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + default_settings: ControlAdapterDefaultSettings | None = Field(None) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_controlnet(mod) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _validate_looks_like_controlnet(cls, mod: ModelOnDisk) -> None: + if not state_dict_has_any_keys_starting_with( + mod.load_state_dict(), + { + "controlnet", + "control_model", + "input_blocks", + # XLabs FLUX ControlNet models have keys starting with "controlnet_blocks." + # For example: https://huggingface.co/XLabs-AI/flux-controlnet-collections/blob/86ab1e915a389d5857135c00e0d350e9e38a9048/flux-canny-controlnet_v2.safetensors + # TODO(ryand): This is very fragile. XLabs FLUX ControlNet models also contain keys starting with + # "double_blocks.", which we check for above. But, I'm afraid to modify this logic because it is so + # delicate. + "controlnet_blocks", + }, + ): + raise NotAMatchError("state dict does not look like a ControlNet checkpoint") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + state_dict = mod.load_state_dict() + + if is_state_dict_xlabs_controlnet(state_dict) or is_state_dict_instantx_controlnet(state_dict): + # TODO(ryand): Should I distinguish between XLabs, InstantX and other ControlNet models by implementing + # get_format()? + return BaseModelType.Flux + + for key in ( + "control_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", + "controlnet_mid_block.bias", + "input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight", + ): + if key not in state_dict: + continue + width = state_dict[key].shape[-1] + match width: + case 768: + return BaseModelType.StableDiffusion1 + case 1024: + return BaseModelType.StableDiffusion2 + case 2048: + return BaseModelType.StableDiffusionXL + case 1280: + return BaseModelType.StableDiffusionXL + case _: + pass + + raise NotAMatchError("unable to determine base type from state dict") + + +class ControlNet_Checkpoint_SD1_Config(ControlNet_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class ControlNet_Checkpoint_SD2_Config(ControlNet_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class ControlNet_Checkpoint_SDXL_Config(ControlNet_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class ControlNet_Checkpoint_FLUX_Config(ControlNet_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) diff --git a/invokeai/backend/model_manager/configs/external_api.py b/invokeai/backend/model_manager/configs/external_api.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/invokeai/backend/model_manager/configs/factory.py b/invokeai/backend/model_manager/configs/factory.py new file mode 100644 index 00000000000..03ab40ca5a8 --- /dev/null +++ b/invokeai/backend/model_manager/configs/factory.py @@ -0,0 +1,499 @@ +import logging +from pathlib import Path +from typing import ( + Union, +) + +from pydantic import Discriminator, TypeAdapter, ValidationError +from typing_extensions import Annotated, Any + +from invokeai.app.services.config.config_default import get_config +from invokeai.app.util.misc import uuid_string +from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.clip_embed import CLIPEmbed_Diffusers_G_Config, CLIPEmbed_Diffusers_L_Config +from invokeai.backend.model_manager.configs.clip_vision import CLIPVision_Diffusers_Config +from invokeai.backend.model_manager.configs.controlnet import ( + ControlAdapterDefaultSettings, + ControlNet_Checkpoint_FLUX_Config, + ControlNet_Checkpoint_SD1_Config, + ControlNet_Checkpoint_SD2_Config, + ControlNet_Checkpoint_SDXL_Config, + ControlNet_Diffusers_FLUX_Config, + ControlNet_Diffusers_SD1_Config, + ControlNet_Diffusers_SD2_Config, + ControlNet_Diffusers_SDXL_Config, +) +from invokeai.backend.model_manager.configs.flux_redux import FLUXRedux_Checkpoint_Config +from invokeai.backend.model_manager.configs.identification_utils import NotAMatchError +from invokeai.backend.model_manager.configs.ip_adapter import ( + IPAdapter_Checkpoint_FLUX_Config, + IPAdapter_Checkpoint_SD1_Config, + IPAdapter_Checkpoint_SD2_Config, + IPAdapter_Checkpoint_SDXL_Config, + IPAdapter_InvokeAI_SD1_Config, + IPAdapter_InvokeAI_SD2_Config, + IPAdapter_InvokeAI_SDXL_Config, +) +from invokeai.backend.model_manager.configs.llava_onevision import LlavaOnevision_Diffusers_Config +from invokeai.backend.model_manager.configs.lora import ( + ControlLoRA_LyCORIS_FLUX_Config, + LoRA_Diffusers_FLUX_Config, + LoRA_Diffusers_SD1_Config, + LoRA_Diffusers_SD2_Config, + LoRA_Diffusers_SDXL_Config, + LoRA_LyCORIS_FLUX_Config, + LoRA_LyCORIS_SD1_Config, + LoRA_LyCORIS_SD2_Config, + LoRA_LyCORIS_SDXL_Config, + LoRA_OMI_FLUX_Config, + LoRA_OMI_SDXL_Config, + LoraModelDefaultSettings, +) +from invokeai.backend.model_manager.configs.main import ( + Main_BnBNF4_FLUX_Config, + Main_Checkpoint_FLUX_Config, + Main_Checkpoint_SD1_Config, + Main_Checkpoint_SD2_Config, + Main_Checkpoint_SDXL_Config, + Main_Checkpoint_SDXLRefiner_Config, + Main_Diffusers_CogView4_Config, + Main_Diffusers_SD1_Config, + Main_Diffusers_SD2_Config, + Main_Diffusers_SD3_Config, + Main_Diffusers_SDXL_Config, + Main_Diffusers_SDXLRefiner_Config, + Main_ExternalAPI_ChatGPT4o_Config, + Main_ExternalAPI_FluxKontext_Config, + Main_ExternalAPI_Gemini2_5_Config, + Main_ExternalAPI_Imagen3_Config, + Main_ExternalAPI_Imagen4_Config, + Main_GGUF_FLUX_Config, + MainModelDefaultSettings, + Video_ExternalAPI_Runway_Config, + Video_ExternalAPI_Veo3_Config, +) +from invokeai.backend.model_manager.configs.siglip import SigLIP_Diffusers_Config +from invokeai.backend.model_manager.configs.spandrel import Spandrel_Checkpoint_Config +from invokeai.backend.model_manager.configs.t2i_adapter import ( + T2IAdapter_Diffusers_SD1_Config, + T2IAdapter_Diffusers_SDXL_Config, +) +from invokeai.backend.model_manager.configs.t5_encoder import T5Encoder_BnBLLMint8_Config, T5Encoder_T5Encoder_Config +from invokeai.backend.model_manager.configs.textual_inversion import ( + TI_File_SD1_Config, + TI_File_SD2_Config, + TI_File_SDXL_Config, + TI_Folder_SD1_Config, + TI_Folder_SD2_Config, + TI_Folder_SDXL_Config, +) +from invokeai.backend.model_manager.configs.unknown import Unknown_Config +from invokeai.backend.model_manager.configs.vae import ( + VAE_Checkpoint_FLUX_Config, + VAE_Checkpoint_SD1_Config, + VAE_Checkpoint_SD2_Config, + VAE_Checkpoint_SDXL_Config, + VAE_Diffusers_SD1_Config, + VAE_Diffusers_SDXL_Config, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelSourceType, + ModelType, + variant_type_adapter, +) + +logger = logging.getLogger(__name__) +app_config = get_config() + +# Known model file extensions for sanity checking +_MODEL_EXTENSIONS = { + ".safetensors", + ".ckpt", + ".pt", + ".pth", + ".bin", + ".gguf", + ".onnx", +} + +# Known config file names for diffusers/transformers models +_CONFIG_FILES = { + "model_index.json", + "config.json", +} + +# Maximum number of files in a directory to be considered a model +_MAX_FILES_IN_MODEL_DIR = 50 + +# Maximum depth to search for model files in directories +_MAX_SEARCH_DEPTH = 2 + + +# The types are listed explicitly because IDEs/LSPs can't identify the correct types +# when AnyModelConfig is constructed dynamically using ModelConfigBase.all_config_classes +AnyModelConfig = Annotated[ + Union[ + # Main (Pipeline) - diffusers format + Annotated[Main_Diffusers_SD1_Config, Main_Diffusers_SD1_Config.get_tag()], + Annotated[Main_Diffusers_SD2_Config, Main_Diffusers_SD2_Config.get_tag()], + Annotated[Main_Diffusers_SDXL_Config, Main_Diffusers_SDXL_Config.get_tag()], + Annotated[Main_Diffusers_SDXLRefiner_Config, Main_Diffusers_SDXLRefiner_Config.get_tag()], + Annotated[Main_Diffusers_SD3_Config, Main_Diffusers_SD3_Config.get_tag()], + Annotated[Main_Diffusers_CogView4_Config, Main_Diffusers_CogView4_Config.get_tag()], + # Main (Pipeline) - checkpoint format + Annotated[Main_Checkpoint_SD1_Config, Main_Checkpoint_SD1_Config.get_tag()], + Annotated[Main_Checkpoint_SD2_Config, Main_Checkpoint_SD2_Config.get_tag()], + Annotated[Main_Checkpoint_SDXL_Config, Main_Checkpoint_SDXL_Config.get_tag()], + Annotated[Main_Checkpoint_SDXLRefiner_Config, Main_Checkpoint_SDXLRefiner_Config.get_tag()], + Annotated[Main_Checkpoint_FLUX_Config, Main_Checkpoint_FLUX_Config.get_tag()], + # Main (Pipeline) - quantized formats + Annotated[Main_BnBNF4_FLUX_Config, Main_BnBNF4_FLUX_Config.get_tag()], + Annotated[Main_GGUF_FLUX_Config, Main_GGUF_FLUX_Config.get_tag()], + # VAE - checkpoint format + Annotated[VAE_Checkpoint_SD1_Config, VAE_Checkpoint_SD1_Config.get_tag()], + Annotated[VAE_Checkpoint_SD2_Config, VAE_Checkpoint_SD2_Config.get_tag()], + Annotated[VAE_Checkpoint_SDXL_Config, VAE_Checkpoint_SDXL_Config.get_tag()], + Annotated[VAE_Checkpoint_FLUX_Config, VAE_Checkpoint_FLUX_Config.get_tag()], + # VAE - diffusers format + Annotated[VAE_Diffusers_SD1_Config, VAE_Diffusers_SD1_Config.get_tag()], + Annotated[VAE_Diffusers_SDXL_Config, VAE_Diffusers_SDXL_Config.get_tag()], + # ControlNet - checkpoint format + Annotated[ControlNet_Checkpoint_SD1_Config, ControlNet_Checkpoint_SD1_Config.get_tag()], + Annotated[ControlNet_Checkpoint_SD2_Config, ControlNet_Checkpoint_SD2_Config.get_tag()], + Annotated[ControlNet_Checkpoint_SDXL_Config, ControlNet_Checkpoint_SDXL_Config.get_tag()], + Annotated[ControlNet_Checkpoint_FLUX_Config, ControlNet_Checkpoint_FLUX_Config.get_tag()], + # ControlNet - diffusers format + Annotated[ControlNet_Diffusers_SD1_Config, ControlNet_Diffusers_SD1_Config.get_tag()], + Annotated[ControlNet_Diffusers_SD2_Config, ControlNet_Diffusers_SD2_Config.get_tag()], + Annotated[ControlNet_Diffusers_SDXL_Config, ControlNet_Diffusers_SDXL_Config.get_tag()], + Annotated[ControlNet_Diffusers_FLUX_Config, ControlNet_Diffusers_FLUX_Config.get_tag()], + # LoRA - LyCORIS format + Annotated[LoRA_LyCORIS_SD1_Config, LoRA_LyCORIS_SD1_Config.get_tag()], + Annotated[LoRA_LyCORIS_SD2_Config, LoRA_LyCORIS_SD2_Config.get_tag()], + Annotated[LoRA_LyCORIS_SDXL_Config, LoRA_LyCORIS_SDXL_Config.get_tag()], + Annotated[LoRA_LyCORIS_FLUX_Config, LoRA_LyCORIS_FLUX_Config.get_tag()], + # LoRA - OMI format + Annotated[LoRA_OMI_SDXL_Config, LoRA_OMI_SDXL_Config.get_tag()], + Annotated[LoRA_OMI_FLUX_Config, LoRA_OMI_FLUX_Config.get_tag()], + # LoRA - diffusers format + Annotated[LoRA_Diffusers_SD1_Config, LoRA_Diffusers_SD1_Config.get_tag()], + Annotated[LoRA_Diffusers_SD2_Config, LoRA_Diffusers_SD2_Config.get_tag()], + Annotated[LoRA_Diffusers_SDXL_Config, LoRA_Diffusers_SDXL_Config.get_tag()], + Annotated[LoRA_Diffusers_FLUX_Config, LoRA_Diffusers_FLUX_Config.get_tag()], + # ControlLoRA - diffusers format + Annotated[ControlLoRA_LyCORIS_FLUX_Config, ControlLoRA_LyCORIS_FLUX_Config.get_tag()], + # T5 Encoder - all formats + Annotated[T5Encoder_T5Encoder_Config, T5Encoder_T5Encoder_Config.get_tag()], + Annotated[T5Encoder_BnBLLMint8_Config, T5Encoder_BnBLLMint8_Config.get_tag()], + # TI - file format + Annotated[TI_File_SD1_Config, TI_File_SD1_Config.get_tag()], + Annotated[TI_File_SD2_Config, TI_File_SD2_Config.get_tag()], + Annotated[TI_File_SDXL_Config, TI_File_SDXL_Config.get_tag()], + # TI - folder format + Annotated[TI_Folder_SD1_Config, TI_Folder_SD1_Config.get_tag()], + Annotated[TI_Folder_SD2_Config, TI_Folder_SD2_Config.get_tag()], + Annotated[TI_Folder_SDXL_Config, TI_Folder_SDXL_Config.get_tag()], + # IP Adapter - InvokeAI format + Annotated[IPAdapter_InvokeAI_SD1_Config, IPAdapter_InvokeAI_SD1_Config.get_tag()], + Annotated[IPAdapter_InvokeAI_SD2_Config, IPAdapter_InvokeAI_SD2_Config.get_tag()], + Annotated[IPAdapter_InvokeAI_SDXL_Config, IPAdapter_InvokeAI_SDXL_Config.get_tag()], + # IP Adapter - checkpoint format + Annotated[IPAdapter_Checkpoint_SD1_Config, IPAdapter_Checkpoint_SD1_Config.get_tag()], + Annotated[IPAdapter_Checkpoint_SD2_Config, IPAdapter_Checkpoint_SD2_Config.get_tag()], + Annotated[IPAdapter_Checkpoint_SDXL_Config, IPAdapter_Checkpoint_SDXL_Config.get_tag()], + Annotated[IPAdapter_Checkpoint_FLUX_Config, IPAdapter_Checkpoint_FLUX_Config.get_tag()], + # T2I Adapter - diffusers format + Annotated[T2IAdapter_Diffusers_SD1_Config, T2IAdapter_Diffusers_SD1_Config.get_tag()], + Annotated[T2IAdapter_Diffusers_SDXL_Config, T2IAdapter_Diffusers_SDXL_Config.get_tag()], + # Misc models + Annotated[Spandrel_Checkpoint_Config, Spandrel_Checkpoint_Config.get_tag()], + Annotated[CLIPEmbed_Diffusers_G_Config, CLIPEmbed_Diffusers_G_Config.get_tag()], + Annotated[CLIPEmbed_Diffusers_L_Config, CLIPEmbed_Diffusers_L_Config.get_tag()], + Annotated[CLIPVision_Diffusers_Config, CLIPVision_Diffusers_Config.get_tag()], + Annotated[SigLIP_Diffusers_Config, SigLIP_Diffusers_Config.get_tag()], + Annotated[FLUXRedux_Checkpoint_Config, FLUXRedux_Checkpoint_Config.get_tag()], + Annotated[LlavaOnevision_Diffusers_Config, LlavaOnevision_Diffusers_Config.get_tag()], + # Main - external API + Annotated[Main_ExternalAPI_ChatGPT4o_Config, Main_ExternalAPI_ChatGPT4o_Config.get_tag()], + Annotated[Main_ExternalAPI_Gemini2_5_Config, Main_ExternalAPI_Gemini2_5_Config.get_tag()], + Annotated[Main_ExternalAPI_Imagen3_Config, Main_ExternalAPI_Imagen3_Config.get_tag()], + Annotated[Main_ExternalAPI_Imagen4_Config, Main_ExternalAPI_Imagen4_Config.get_tag()], + Annotated[Main_ExternalAPI_FluxKontext_Config, Main_ExternalAPI_FluxKontext_Config.get_tag()], + # Video - external API + Annotated[Video_ExternalAPI_Veo3_Config, Video_ExternalAPI_Veo3_Config.get_tag()], + Annotated[Video_ExternalAPI_Runway_Config, Video_ExternalAPI_Runway_Config.get_tag()], + # Unknown model (fallback) + Annotated[Unknown_Config, Unknown_Config.get_tag()], + ], + Discriminator(Config_Base.get_model_discriminator_value), +] + +AnyModelConfigValidator = TypeAdapter[AnyModelConfig](AnyModelConfig) +"""Pydantic TypeAdapter for the AnyModelConfig union, used for parsing and validation. + +If you need to parse/validate a dict or JSON into an AnyModelConfig, you should probably use +ModelConfigFactory.from_dict or ModelConfigFactory.from_json instead as they may implement +additional logic in the future. +""" + + +class ModelConfigFactory: + @staticmethod + def from_dict(fields: dict[str, Any]) -> AnyModelConfig: + """Return the appropriate config object from raw dict values.""" + model = AnyModelConfigValidator.validate_python(fields) + return model + + @staticmethod + def from_json(json: str | bytes | bytearray) -> AnyModelConfig: + """Return the appropriate config object from json.""" + model = AnyModelConfigValidator.validate_json(json) + return model + + @staticmethod + def build_common_fields( + mod: ModelOnDisk, + override_fields: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Builds the common fields for all model configs. + + Args: + mod: The model on disk to extract fields from. + overrides: A optional dictionary of fields to override. These fields will take precedence over the values + extracted from the model on disk. + + - Casts string fields to their Enum types. + - Does not validate the fields against the model config schema. + """ + + _overrides: dict[str, Any] = override_fields or {} + fields: dict[str, Any] = {} + + if "type" in _overrides: + fields["type"] = ModelType(_overrides["type"]) + + if "format" in _overrides: + fields["format"] = ModelFormat(_overrides["format"]) + + if "base" in _overrides: + fields["base"] = BaseModelType(_overrides["base"]) + + if "source_type" in _overrides: + fields["source_type"] = ModelSourceType(_overrides["source_type"]) + + if "variant" in _overrides: + fields["variant"] = variant_type_adapter.validate_strings(_overrides["variant"]) + + fields["path"] = mod.path.as_posix() + fields["source"] = _overrides.get("source") or fields["path"] + fields["source_type"] = _overrides.get("source_type") or ModelSourceType.Path + fields["name"] = _overrides.get("name") or mod.name + fields["hash"] = _overrides.get("hash") or mod.hash() + fields["key"] = _overrides.get("key") or uuid_string() + fields["description"] = _overrides.get("description") + fields["file_size"] = _overrides.get("file_size") or mod.size() + + return fields + + @staticmethod + def _validate_path_looks_like_model(path: Path) -> None: + """Perform basic sanity checks to ensure a path looks like a model. + + This prevents wasting time trying to identify obviously non-model paths like + home directories or downloads folders. Raises RuntimeError if the path doesn't + pass basic checks. + + Args: + path: The path to validate + + Raises: + RuntimeError: If the path doesn't look like a model + """ + if path.is_file(): + # For files, just check the extension + if path.suffix.lower() not in _MODEL_EXTENSIONS: + raise RuntimeError( + f"File extension {path.suffix} is not a recognized model format. " + f"Expected one of: {', '.join(sorted(_MODEL_EXTENSIONS))}" + ) + else: + # For directories, do a quick file count check with early exit + total_files = 0 + for item in path.rglob("*"): + if item.is_file(): + total_files += 1 + if total_files > _MAX_FILES_IN_MODEL_DIR: + raise RuntimeError( + f"Directory contains more than {_MAX_FILES_IN_MODEL_DIR} files. " + "This looks like a general-purpose directory rather than a model. " + "Please provide a path to a specific model file or model directory." + ) + + # Check if it has config files at root (diffusers/transformers marker) + has_root_config = any((path / config).exists() for config in _CONFIG_FILES) + + if has_root_config: + # Has a config file, looks like a valid model directory + return + + # Otherwise, search for model files within depth limit + def find_model_files(current_path: Path, depth: int) -> bool: + if depth > _MAX_SEARCH_DEPTH: + return False + try: + for item in current_path.iterdir(): + if item.is_file() and item.suffix.lower() in _MODEL_EXTENSIONS: + return True + elif item.is_dir() and find_model_files(item, depth + 1): + return True + except PermissionError: + pass + return False + + if not find_model_files(path, 0): + raise RuntimeError( + f"No model files or config files found in directory {path}. " + f"Expected to find model files with extensions: {', '.join(sorted(_MODEL_EXTENSIONS))} " + f"or config files: {', '.join(sorted(_CONFIG_FILES))}" + ) + + @staticmethod + def from_model_on_disk( + mod: str | Path | ModelOnDisk, + override_fields: dict[str, Any] | None = None, + hash_algo: HASHING_ALGORITHMS = "blake3_single", + ) -> AnyModelConfig: + """ + Returns the best matching ModelConfig instance from a model's file/folder path. + Raises InvalidModelConfigException if no valid configuration is found. + Created to deprecate ModelProbe.probe + """ + if isinstance(mod, Path | str): + mod = ModelOnDisk(Path(mod), hash_algo) + + # Perform basic sanity checks before attempting any config matching + # This rejects obviously non-model paths early, saving time + ModelConfigFactory._validate_path_looks_like_model(mod.path) + + # We will always need these fields to build any model config. + fields = ModelConfigFactory.build_common_fields(mod, override_fields) + + # Store results as a mapping of config class to either an instance of that class or an exception + # that was raised when trying to build it. + results: dict[str, AnyModelConfig | Exception] = {} + + # Try to build an instance of each model config class that uses the classify API. + # Each class will either return an instance of itself or raise NotAMatch if it doesn't match. + # Other exceptions may be raised if something unexpected happens during matching or building. + for config_class in Config_Base.CONFIG_CLASSES: + class_name = config_class.__name__ + try: + instance = config_class.from_model_on_disk(mod, fields) + # Technically, from_model_on_disk returns a Config_Base, but in practice it will always be a member of + # the AnyModelConfig union. + results[class_name] = instance # type: ignore + except NotAMatchError as e: + results[class_name] = e + logger.debug(f"No match for {config_class.__name__} on model {mod.name}") + except ValidationError as e: + # This means the model matched, but we couldn't create the pydantic model instance for the config. + # Maybe invalid overrides were provided? + results[class_name] = e + logger.warning(f"Schema validation error for {config_class.__name__} on model {mod.name}: {e}") + except Exception as e: + results[class_name] = e + logger.debug(f"Unexpected exception while matching {mod.name} to {config_class.__name__}: {e}") + + matches = [r for r in results.values() if isinstance(r, Config_Base)] + + if not matches: + # No matches at all. This should be very rare, but just in case, we will fall back to Unknown_Config. + msg = f"No model config matched for model {mod.path}" + logger.error(msg) + raise RuntimeError(msg) + + # It is possible that we have multiple matches. We need to prioritize them. + # + # Known cases where multiple matches can occur: + # - SD main models can look like a LoRA when they have merged in LoRA weights. Prefer the main model. + # - SD main models in diffusers format can look like a CLIP Embed; they have a text_encoder folder with + # a config.json file. Prefer the main model. + # + # Given the above cases, we can prioritize the matches by type. If we find more cases, we may need a more + # sophisticated approach. + # + # Unknown models should always be the last resort fallback. + def sort_key(m: AnyModelConfig) -> int: + match m.type: + case ModelType.Main: + return 0 + case ModelType.LoRA: + return 1 + case ModelType.CLIPEmbed: + return 2 + case ModelType.Unknown: + # Unknown should always be tried last as a fallback + return 999 + case _: + return 3 + + matches.sort(key=sort_key) + + if len(matches) > 1: + logger.warning( + f"Multiple model config classes matched for model {mod.path}: {[type(m).__name__ for m in matches]}." + ) + + instance = matches[0] + if isinstance(instance, Unknown_Config): + logger.warning(f"Unable to identify model {mod.path}, falling back to Unknown_Config") + else: + logger.info(f"Model {mod.path} classified as {type(instance).__name__}") + + # Now do any post-processing needed for specific model types/bases/etc. + match instance.type: + case ModelType.Main: + match instance.base: + case BaseModelType.StableDiffusion1: + instance.default_settings = MainModelDefaultSettings(width=512, height=512) + case BaseModelType.StableDiffusion2: + instance.default_settings = MainModelDefaultSettings(width=768, height=768) + case BaseModelType.StableDiffusionXL: + instance.default_settings = MainModelDefaultSettings(width=1024, height=1024) + case _: + pass + case ModelType.ControlNet | ModelType.T2IAdapter | ModelType.ControlLoRa: + instance.default_settings = ControlAdapterDefaultSettings.from_model_name(instance.name) + case ModelType.LoRA: + instance.default_settings = LoraModelDefaultSettings() + case _: + pass + + return instance + + +MODEL_NAME_TO_PREPROCESSOR = { + "canny": "canny_image_processor", + "mlsd": "mlsd_image_processor", + "depth": "depth_anything_image_processor", + "bae": "normalbae_image_processor", + "normal": "normalbae_image_processor", + "sketch": "pidi_image_processor", + "scribble": "lineart_image_processor", + "lineart anime": "lineart_anime_image_processor", + "lineart_anime": "lineart_anime_image_processor", + "lineart": "lineart_image_processor", + "soft": "hed_image_processor", + "softedge": "hed_image_processor", + "hed": "hed_image_processor", + "shuffle": "content_shuffle_image_processor", + "pose": "dw_openpose_image_processor", + "mediapipe": "mediapipe_face_processor", + "pidi": "pidi_image_processor", + "zoe": "zoe_depth_image_processor", + "color": "color_map_image_processor", +} diff --git a/invokeai/backend/model_manager/configs/flux_redux.py b/invokeai/backend/model_manager/configs/flux_redux.py new file mode 100644 index 00000000000..6eb76116fba --- /dev/null +++ b/invokeai/backend/model_manager/configs/flux_redux.py @@ -0,0 +1,40 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.flux.redux.flux_redux_state_dict_utils import is_state_dict_likely_flux_redux +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + raise_for_override_fields, + raise_if_not_file, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + + +class FLUXRedux_Checkpoint_Config(Config_Base): + """Model config for FLUX Tools Redux model.""" + + type: Literal[ModelType.FluxRedux] = Field(default=ModelType.FluxRedux) + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + if not is_state_dict_likely_flux_redux(mod.load_state_dict()): + raise NotAMatchError("model does not match FLUX Tools Redux heuristics") + + return cls(**override_fields) diff --git a/invokeai/backend/model_manager/configs/identification_utils.py b/invokeai/backend/model_manager/configs/identification_utils.py new file mode 100644 index 00000000000..ce7d2c792de --- /dev/null +++ b/invokeai/backend/model_manager/configs/identification_utils.py @@ -0,0 +1,206 @@ +import json +from functools import cache +from pathlib import Path + +from pydantic import BaseModel, ValidationError +from pydantic_core import CoreSchema, SchemaValidator +from typing_extensions import Any + +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk + + +class NotAMatchError(Exception): + """Exception for when a model does not match a config class. + + Args: + reason: The reason why the model did not match. + """ + + def __init__(self, reason: str): + super().__init__(reason) + + +def get_config_dict_or_raise(config_path: Path | set[Path]) -> dict[str, Any]: + """Load the diffusers/transformers model config file and return it as a dictionary. The config file is expected + to be in JSON format. + + Args: + config_path: The path to the config file, or a set of paths to try. + + Returns: + The config file as a dictionary. + + Raises: + NotAMatch if the config file is missing or cannot be loaded. + """ + paths_to_check = config_path if isinstance(config_path, set) else {config_path} + + problems: dict[Path, str] = {} + + for p in paths_to_check: + if not p.exists(): + problems[p] = "file does not exist" + continue + + try: + with open(p, "r") as file: + config = json.load(file) + + return config + except Exception as e: + problems[p] = str(e) + continue + + raise NotAMatchError(f"unable to load config file(s): {problems}") + + +def get_class_name_from_config_dict_or_raise(config: Path | set[Path] | dict[str, Any]) -> str: + """Load the diffusers/transformers model config file and return the class name. + + Args: + config_path: The path to the config file, or a set of paths to try. + + Returns: + The class name from the config file. + + Raises: + NotAMatch if the config file is missing or does not contain a valid class name. + """ + + if not isinstance(config, dict): + config = get_config_dict_or_raise(config) + + try: + if "_class_name" in config: + # This is a diffusers-style config + config_class_name = config["_class_name"] + elif "architectures" in config: + # This is a transformers-style config + config_class_name = config["architectures"][0] + else: + raise ValueError("missing _class_name or architectures field") + except Exception as e: + raise NotAMatchError(f"unable to determine class name from config file: {config}") from e + + if not isinstance(config_class_name, str): + raise NotAMatchError(f"_class_name or architectures field is not a string: {config_class_name}") + + return config_class_name + + +def raise_for_class_name(config: Path | set[Path] | dict[str, Any], class_name: str | set[str]) -> None: + """Get the class name from the config file and raise NotAMatch if it is not in the expected set. + + Args: + config_path: The path to the config file, or a set of paths to try. + class_name: The expected class name, or a set of expected class names. + + Raises: + NotAMatch if the class name is not in the expected set. + """ + + class_name = {class_name} if isinstance(class_name, str) else class_name + + actual_class_name = get_class_name_from_config_dict_or_raise(config) + if actual_class_name not in class_name: + raise NotAMatchError(f"invalid class name from config: {actual_class_name}") + + +def raise_for_override_fields(candidate_config_class: type[BaseModel], override_fields: dict[str, Any]) -> None: + """Check if the provided override fields are valid for the config class using pydantic. + + For example, if the candidate config class has a field "base" of type Literal[BaseModelType.StableDiffusion1], and + the override fields contain "base": BaseModelType.Flux, this function will raise NotAMatch. + + Internally, this function extracts the pydantic schema for each individual override field from the candidate config + class and validates the override value against that schema. Post-instantiation validators are not run. + + Args: + candidate_config_class: The config class that is being tested. + override_fields: The override fields provided by the user. + + Raises: + NotAMatch if any override field is invalid for the config class. + """ + for field_name, override_value in override_fields.items(): + if field_name not in candidate_config_class.model_fields: + raise NotAMatchError(f"unknown override field: {field_name}") + try: + PydanticFieldValidator.validate_field(candidate_config_class, field_name, override_value) + except ValidationError as e: + raise NotAMatchError(f"invalid override for field '{field_name}': {e}") from e + + +def raise_if_not_file(mod: ModelOnDisk) -> None: + """Raise NotAMatch if the model path is not a file.""" + if not mod.path.is_file(): + raise NotAMatchError("model path is not a file") + + +def raise_if_not_dir(mod: ModelOnDisk) -> None: + """Raise NotAMatch if the model path is not a directory.""" + if not mod.path.is_dir(): + raise NotAMatchError("model path is not a directory") + + +def state_dict_has_any_keys_exact(state_dict: dict[str | int, Any], keys: str | set[str]) -> bool: + """Returns true if the state dict has any of the specified keys.""" + _keys = {keys} if isinstance(keys, str) else keys + return any(key in state_dict for key in _keys) + + +def state_dict_has_any_keys_starting_with(state_dict: dict[str | int, Any], prefixes: str | set[str]) -> bool: + """Returns true if the state dict has any keys starting with any of the specified prefixes.""" + _prefixes = {prefixes} if isinstance(prefixes, str) else prefixes + return any(any(key.startswith(prefix) for prefix in _prefixes) for key in state_dict.keys() if isinstance(key, str)) + + +def state_dict_has_any_keys_ending_with(state_dict: dict[str | int, Any], suffixes: str | set[str]) -> bool: + """Returns true if the state dict has any keys ending with any of the specified suffixes.""" + _suffixes = {suffixes} if isinstance(suffixes, str) else suffixes + return any(any(key.endswith(suffix) for suffix in _suffixes) for key in state_dict.keys() if isinstance(key, str)) + + +def common_config_paths(path: Path) -> set[Path]: + """Returns common config file paths for models stored in directories.""" + return {path / "config.json", path / "model_index.json"} + + +class PydanticFieldValidator: + """Utility class for validating individual fields of a Pydantic model without instantiating the whole model. + + See: https://github.com/pydantic/pydantic/discussions/7367#discussioncomment-14213144 + """ + + @staticmethod + def find_field_schema(model: type[BaseModel], field_name: str) -> CoreSchema: + """Find the Pydantic core schema for a specific field in a model.""" + schema: CoreSchema = model.__pydantic_core_schema__.copy() + # we shallow copied, be careful not to mutate the original schema! + + assert schema["type"] in ["definitions", "model"] + + # find the field schema + field_schema = schema["schema"] # type: ignore + while "fields" not in field_schema: + field_schema = field_schema["schema"] # type: ignore + + field_schema = field_schema["fields"][field_name]["schema"] # type: ignore + + # if the original schema is a definition schema, replace the model schema with the field schema + if schema["type"] == "definitions": + schema["schema"] = field_schema + return schema + else: + return field_schema + + @cache + @staticmethod + def get_validator(model: type[BaseModel], field_name: str) -> SchemaValidator: + """Get a SchemaValidator for a specific field in a model.""" + return SchemaValidator(PydanticFieldValidator.find_field_schema(model, field_name)) + + @staticmethod + def validate_field(model: type[BaseModel], field_name: str, value: Any) -> Any: + """Validate a value for a specific field in a model.""" + return PydanticFieldValidator.get_validator(model, field_name).validate_python(value) diff --git a/invokeai/backend/model_manager/configs/ip_adapter.py b/invokeai/backend/model_manager/configs/ip_adapter.py new file mode 100644 index 00000000000..ba27f176201 --- /dev/null +++ b/invokeai/backend/model_manager/configs/ip_adapter.py @@ -0,0 +1,180 @@ +from abc import ABC +from typing import ( + Literal, + Self, +) + +from pydantic import BaseModel, Field +from typing_extensions import Any + +from invokeai.backend.flux.ip_adapter.state_dict_utils import is_state_dict_xlabs_ip_adapter +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + raise_for_override_fields, + raise_if_not_dir, + raise_if_not_file, + state_dict_has_any_keys_starting_with, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + + +class IPAdapter_Config_Base(ABC, BaseModel): + type: Literal[ModelType.IPAdapter] = Field(default=ModelType.IPAdapter) + + +class IPAdapter_InvokeAI_Config_Base(IPAdapter_Config_Base): + """Model config for IP Adapter diffusers format models.""" + + format: Literal[ModelFormat.InvokeAI] = Field(default=ModelFormat.InvokeAI) + + # TODO(ryand): Should we deprecate this field? From what I can tell, it hasn't been probed correctly for a long + # time. Need to go through the history to make sure I'm understanding this fully. + image_encoder_model_id: str = Field() + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_has_weights_file(mod) + + cls._validate_has_image_encoder_metadata_file(mod) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _validate_has_weights_file(cls, mod: ModelOnDisk) -> None: + weights_file = mod.path / "ip_adapter.bin" + if not weights_file.exists(): + raise NotAMatchError("missing ip_adapter.bin weights file") + + @classmethod + def _validate_has_image_encoder_metadata_file(cls, mod: ModelOnDisk) -> None: + image_encoder_metadata_file = mod.path / "image_encoder.txt" + if not image_encoder_metadata_file.exists(): + raise NotAMatchError("missing image_encoder.txt metadata file") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + state_dict = mod.load_state_dict() + + try: + cross_attention_dim = state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[-1] + except Exception as e: + raise NotAMatchError(f"unable to determine cross attention dimension: {e}") from e + + match cross_attention_dim: + case 768: + return BaseModelType.StableDiffusion1 + case 1024: + return BaseModelType.StableDiffusion2 + case 2048: + return BaseModelType.StableDiffusionXL + case _: + raise NotAMatchError(f"unrecognized cross attention dimension {cross_attention_dim}") + + +class IPAdapter_InvokeAI_SD1_Config(IPAdapter_InvokeAI_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class IPAdapter_InvokeAI_SD2_Config(IPAdapter_InvokeAI_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class IPAdapter_InvokeAI_SDXL_Config(IPAdapter_InvokeAI_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class IPAdapter_Checkpoint_Config_Base(IPAdapter_Config_Base): + """Model config for IP Adapter checkpoint format models.""" + + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_ip_adapter(mod) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _validate_looks_like_ip_adapter(cls, mod: ModelOnDisk) -> None: + if not state_dict_has_any_keys_starting_with( + mod.load_state_dict(), + { + "image_proj.", + "ip_adapter.", + # XLabs FLUX IP-Adapter models have keys startinh with "ip_adapter_proj_model.". + "ip_adapter_proj_model.", + }, + ): + raise NotAMatchError("model does not match Checkpoint IP Adapter heuristics") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + state_dict = mod.load_state_dict() + + if is_state_dict_xlabs_ip_adapter(state_dict): + return BaseModelType.Flux + + try: + cross_attention_dim = state_dict["ip_adapter.1.to_k_ip.weight"].shape[-1] + except Exception as e: + raise NotAMatchError(f"unable to determine cross attention dimension: {e}") from e + + match cross_attention_dim: + case 768: + return BaseModelType.StableDiffusion1 + case 1024: + return BaseModelType.StableDiffusion2 + case 2048: + return BaseModelType.StableDiffusionXL + case _: + raise NotAMatchError(f"unrecognized cross attention dimension {cross_attention_dim}") + + +class IPAdapter_Checkpoint_SD1_Config(IPAdapter_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class IPAdapter_Checkpoint_SD2_Config(IPAdapter_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class IPAdapter_Checkpoint_SDXL_Config(IPAdapter_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class IPAdapter_Checkpoint_FLUX_Config(IPAdapter_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) diff --git a/invokeai/backend/model_manager/configs/llava_onevision.py b/invokeai/backend/model_manager/configs/llava_onevision.py new file mode 100644 index 00000000000..c6ceb43ca9d --- /dev/null +++ b/invokeai/backend/model_manager/configs/llava_onevision.py @@ -0,0 +1,42 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + common_config_paths, + raise_for_class_name, + raise_for_override_fields, + raise_if_not_dir, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelType, +) + + +class LlavaOnevision_Diffusers_Config(Diffusers_Config_Base, Config_Base): + """Model config for Llava Onevision models.""" + + type: Literal[ModelType.LlavaOnevision] = Field(default=ModelType.LlavaOnevision) + base: Literal[BaseModelType.Any] = Field(default=BaseModelType.Any) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + raise_for_class_name( + common_config_paths(mod.path), + { + "LlavaOnevisionForConditionalGeneration", + }, + ) + + return cls(**override_fields) diff --git a/invokeai/backend/model_manager/configs/lora.py b/invokeai/backend/model_manager/configs/lora.py new file mode 100644 index 00000000000..24e10c035a9 --- /dev/null +++ b/invokeai/backend/model_manager/configs/lora.py @@ -0,0 +1,323 @@ +from abc import ABC +from pathlib import Path +from typing import ( + Any, + Literal, + Self, +) + +from pydantic import BaseModel, ConfigDict, Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import ( + Config_Base, +) +from invokeai.backend.model_manager.configs.controlnet import ControlAdapterDefaultSettings +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + raise_for_override_fields, + raise_if_not_dir, + raise_if_not_file, + state_dict_has_any_keys_ending_with, + state_dict_has_any_keys_starting_with, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.omi import flux_dev_1_lora, stable_diffusion_xl_1_lora +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + FluxLoRAFormat, + ModelFormat, + ModelType, +) +from invokeai.backend.model_manager.util.model_util import lora_token_vector_length +from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control + + +class LoraModelDefaultSettings(BaseModel): + weight: float | None = Field(default=None, ge=-1, le=2, description="Default weight for this model") + model_config = ConfigDict(extra="forbid") + + +class LoRA_Config_Base(ABC, BaseModel): + """Base class for LoRA models.""" + + type: Literal[ModelType.LoRA] = Field(default=ModelType.LoRA) + trigger_phrases: set[str] | None = Field( + default=None, + description="Set of trigger phrases for this model", + ) + default_settings: LoraModelDefaultSettings | None = Field( + default=None, + description="Default settings for this model", + ) + + +def _get_flux_lora_format(mod: ModelOnDisk) -> FluxLoRAFormat | None: + # TODO(psyche): Moving this import to the function to avoid circular imports. Refactor later. + from invokeai.backend.patches.lora_conversions.formats import flux_format_from_state_dict + + state_dict = mod.load_state_dict(mod.path) + value = flux_format_from_state_dict(state_dict, mod.metadata()) + return value + + +class LoRA_OMI_Config_Base(LoRA_Config_Base): + format: Literal[ModelFormat.OMI] = Field(default=ModelFormat.OMI) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_omi_lora(mod) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _validate_looks_like_omi_lora(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model metadata does not look like an OMI LoRA.""" + flux_format = _get_flux_lora_format(mod) + if flux_format in [FluxLoRAFormat.Control, FluxLoRAFormat.Diffusers]: + raise NotAMatchError("model looks like ControlLoRA or Diffusers LoRA") + + metadata = mod.metadata() + + metadata_looks_like_omi_lora = ( + bool(metadata.get("modelspec.sai_model_spec")) + and metadata.get("ot_branch") == "omi_format" + and metadata.get("modelspec.architecture", "").split("/")[1].lower() == "lora" + ) + + if not metadata_looks_like_omi_lora: + raise NotAMatchError("metadata does not look like OMI LoRA") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> Literal[BaseModelType.Flux, BaseModelType.StableDiffusionXL]: + metadata = mod.metadata() + architecture = metadata["modelspec.architecture"] + + if architecture == stable_diffusion_xl_1_lora: + return BaseModelType.StableDiffusionXL + elif architecture == flux_dev_1_lora: + return BaseModelType.Flux + else: + raise NotAMatchError(f"unrecognised/unsupported architecture for OMI LoRA: {architecture}") + + +class LoRA_OMI_SDXL_Config(LoRA_OMI_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class LoRA_OMI_FLUX_Config(LoRA_OMI_Config_Base, Config_Base): + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + + +class LoRA_LyCORIS_Config_Base(LoRA_Config_Base): + """Model config for LoRA/Lycoris models.""" + + type: Literal[ModelType.LoRA] = Field(default=ModelType.LoRA) + format: Literal[ModelFormat.LyCORIS] = Field(default=ModelFormat.LyCORIS) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_lora(mod) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _validate_looks_like_lora(cls, mod: ModelOnDisk) -> None: + # First rule out ControlLoRA and Diffusers LoRA + flux_format = _get_flux_lora_format(mod) + if flux_format in [FluxLoRAFormat.Control, FluxLoRAFormat.Diffusers]: + raise NotAMatchError("model looks like ControlLoRA or Diffusers LoRA") + + # Note: Existence of these key prefixes/suffixes does not guarantee that this is a LoRA. + # Some main models have these keys, likely due to the creator merging in a LoRA. + has_key_with_lora_prefix = state_dict_has_any_keys_starting_with( + mod.load_state_dict(), + { + "lora_te_", + "lora_unet_", + "lora_te1_", + "lora_te2_", + "lora_transformer_", + }, + ) + + has_key_with_lora_suffix = state_dict_has_any_keys_ending_with( + mod.load_state_dict(), + { + "to_k_lora.up.weight", + "to_q_lora.down.weight", + "lora_A.weight", + "lora_B.weight", + }, + ) + + if not has_key_with_lora_prefix and not has_key_with_lora_suffix: + raise NotAMatchError("model does not match LyCORIS LoRA heuristics") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + if _get_flux_lora_format(mod): + return BaseModelType.Flux + + state_dict = mod.load_state_dict() + # If we've gotten here, we assume that the model is a Stable Diffusion model + token_vector_length = lora_token_vector_length(state_dict) + if token_vector_length == 768: + return BaseModelType.StableDiffusion1 + elif token_vector_length == 1024: + return BaseModelType.StableDiffusion2 + elif token_vector_length == 1280: + return BaseModelType.StableDiffusionXL # recognizes format at https://civitai.com/models/224641 + elif token_vector_length == 2048: + return BaseModelType.StableDiffusionXL + else: + raise NotAMatchError(f"unrecognized token vector length {token_vector_length}") + + +class LoRA_LyCORIS_SD1_Config(LoRA_LyCORIS_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class LoRA_LyCORIS_SD2_Config(LoRA_LyCORIS_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class LoRA_LyCORIS_SDXL_Config(LoRA_LyCORIS_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class LoRA_LyCORIS_FLUX_Config(LoRA_LyCORIS_Config_Base, Config_Base): + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + + +class ControlAdapter_Config_Base(ABC, BaseModel): + default_settings: ControlAdapterDefaultSettings | None = Field(None) + + +class ControlLoRA_LyCORIS_FLUX_Config(ControlAdapter_Config_Base, Config_Base): + """Model config for Control LoRA models.""" + + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + type: Literal[ModelType.ControlLoRa] = Field(default=ModelType.ControlLoRa) + format: Literal[ModelFormat.LyCORIS] = Field(default=ModelFormat.LyCORIS) + + trigger_phrases: set[str] | None = Field(None) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_control_lora(mod) + + return cls(**override_fields) + + @classmethod + def _validate_looks_like_control_lora(cls, mod: ModelOnDisk) -> None: + state_dict = mod.load_state_dict() + + if not is_state_dict_likely_flux_control(state_dict): + raise NotAMatchError("model state dict does not look like a Flux Control LoRA") + + +class LoRA_Diffusers_Config_Base(LoRA_Config_Base): + """Model config for LoRA/Diffusers models.""" + + # TODO(psyche): Needs base handling. For FLUX, the Diffusers format does not indicate a folder model; it indicates + # the weights format. FLUX Diffusers LoRAs are single files. + + format: Literal[ModelFormat.Diffusers] = Field(default=ModelFormat.Diffusers) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + if _get_flux_lora_format(mod): + return BaseModelType.Flux + + # If we've gotten here, we assume that the LoRA is a Stable Diffusion LoRA + path_to_weight_file = cls._get_weight_file_or_raise(mod) + state_dict = mod.load_state_dict(path_to_weight_file) + token_vector_length = lora_token_vector_length(state_dict) + + match token_vector_length: + case 768: + return BaseModelType.StableDiffusion1 + case 1024: + return BaseModelType.StableDiffusion2 + case 1280: + return BaseModelType.StableDiffusionXL # recognizes format at https://civitai.com/models/224641 + case 2048: + return BaseModelType.StableDiffusionXL + case _: + raise NotAMatchError(f"unrecognized token vector length {token_vector_length}") + + @classmethod + def _get_weight_file_or_raise(cls, mod: ModelOnDisk) -> Path: + suffixes = ["bin", "safetensors"] + weight_files = [mod.path / f"pytorch_lora_weights.{sfx}" for sfx in suffixes] + for wf in weight_files: + if wf.exists(): + return wf + raise NotAMatchError("missing pytorch_lora_weights.bin or pytorch_lora_weights.safetensors") + + +class LoRA_Diffusers_SD1_Config(LoRA_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class LoRA_Diffusers_SD2_Config(LoRA_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class LoRA_Diffusers_SDXL_Config(LoRA_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class LoRA_Diffusers_FLUX_Config(LoRA_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) diff --git a/invokeai/backend/model_manager/configs/main.py b/invokeai/backend/model_manager/configs/main.py new file mode 100644 index 00000000000..26f6b5b60e0 --- /dev/null +++ b/invokeai/backend/model_manager/configs/main.py @@ -0,0 +1,692 @@ +from abc import ABC +from typing import Any, Literal, Self + +from pydantic import BaseModel, ConfigDict, Field + +from invokeai.backend.model_manager.configs.base import ( + Checkpoint_Config_Base, + Config_Base, + Diffusers_Config_Base, + SubmodelDefinition, +) +from invokeai.backend.model_manager.configs.clip_embed import get_clip_variant_type_from_config +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + common_config_paths, + get_config_dict_or_raise, + raise_for_class_name, + raise_for_override_fields, + raise_if_not_dir, + raise_if_not_file, + state_dict_has_any_keys_exact, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + FluxVariantType, + ModelFormat, + ModelType, + ModelVariantType, + SchedulerPredictionType, + SubModelType, +) +from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor +from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES + +DEFAULTS_PRECISION = Literal["fp16", "fp32"] + + +class MainModelDefaultSettings(BaseModel): + vae: str | None = Field(default=None, description="Default VAE for this model (model key)") + vae_precision: DEFAULTS_PRECISION | None = Field(default=None, description="Default VAE precision for this model") + scheduler: SCHEDULER_NAME_VALUES | None = Field(default=None, description="Default scheduler for this model") + steps: int | None = Field(default=None, gt=0, description="Default number of steps for this model") + cfg_scale: float | None = Field(default=None, ge=1, description="Default CFG Scale for this model") + cfg_rescale_multiplier: float | None = Field( + default=None, ge=0, lt=1, description="Default CFG Rescale Multiplier for this model" + ) + width: int | None = Field(default=None, multiple_of=8, ge=64, description="Default width for this model") + height: int | None = Field(default=None, multiple_of=8, ge=64, description="Default height for this model") + guidance: float | None = Field(default=None, ge=1, description="Default Guidance for this model") + + model_config = ConfigDict(extra="forbid") + + +class Main_Config_Base(ABC, BaseModel): + type: Literal[ModelType.Main] = Field(default=ModelType.Main) + trigger_phrases: set[str] | None = Field( + default=None, + description="Set of trigger phrases for this model", + ) + default_settings: MainModelDefaultSettings | None = Field( + default=None, + description="Default settings for this model", + ) + + +def _has_bnb_nf4_keys(state_dict: dict[str | int, Any]) -> bool: + bnb_nf4_keys = { + "double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4", + "model.diffusion_model.double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4", + } + return any(key in state_dict for key in bnb_nf4_keys) + + +def _has_ggml_tensors(state_dict: dict[str | int, Any]) -> bool: + return any(isinstance(v, GGMLTensor) for v in state_dict.values()) + + +def _has_main_keys(state_dict: dict[str | int, Any]) -> bool: + for key in state_dict.keys(): + if isinstance(key, int): + continue + elif key.startswith( + ( + "cond_stage_model.", + "first_stage_model.", + "model.diffusion_model.", + # Some FLUX checkpoint files contain transformer keys prefixed with "model.diffusion_model". + # This prefix is typically used to distinguish between multiple models bundled in a single file. + "model.diffusion_model.double_blocks.", + ) + ): + return True + elif key.startswith("double_blocks.") and "ip_adapter" not in key: + # FLUX models in the official BFL format contain keys with the "double_blocks." prefix, but we must be + # careful to avoid false positives on XLabs FLUX IP-Adapter models. + return True + return False + + +class Main_Checkpoint_Config_Base(Checkpoint_Config_Base, Main_Config_Base): + """Model config for main checkpoint models.""" + + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + + prediction_type: SchedulerPredictionType = Field() + variant: ModelVariantType = Field() + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_main_model(mod) + + cls._validate_base(mod) + + prediction_type = override_fields.get("prediction_type") or cls._get_scheduler_prediction_type_or_raise(mod) + + variant = override_fields.get("variant") or cls._get_variant_or_raise(mod) + + return cls(**override_fields, prediction_type=prediction_type, variant=variant) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + state_dict = mod.load_state_dict() + + key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" + if key_name in state_dict and state_dict[key_name].shape[-1] == 768: + return BaseModelType.StableDiffusion1 + if key_name in state_dict and state_dict[key_name].shape[-1] == 1024: + return BaseModelType.StableDiffusion2 + + key_name = "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k.weight" + if key_name in state_dict and state_dict[key_name].shape[-1] == 2048: + return BaseModelType.StableDiffusionXL + elif key_name in state_dict and state_dict[key_name].shape[-1] == 1280: + return BaseModelType.StableDiffusionXLRefiner + + raise NotAMatchError("unable to determine base type from state dict") + + @classmethod + def _get_scheduler_prediction_type_or_raise(cls, mod: ModelOnDisk) -> SchedulerPredictionType: + base = cls.model_fields["base"].default + + if base is BaseModelType.StableDiffusion2: + state_dict = mod.load_state_dict() + key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" + if key_name in state_dict and state_dict[key_name].shape[-1] == 1024: + if "global_step" in state_dict: + if state_dict["global_step"] == 220000: + return SchedulerPredictionType.Epsilon + elif state_dict["global_step"] == 110000: + return SchedulerPredictionType.VPrediction + return SchedulerPredictionType.VPrediction + else: + return SchedulerPredictionType.Epsilon + + @classmethod + def _get_variant_or_raise(cls, mod: ModelOnDisk) -> ModelVariantType: + base = cls.model_fields["base"].default + + state_dict = mod.load_state_dict() + key_name = "model.diffusion_model.input_blocks.0.0.weight" + + if key_name not in state_dict: + raise NotAMatchError("unable to determine model variant from state dict") + + in_channels = state_dict["model.diffusion_model.input_blocks.0.0.weight"].shape[1] + + match in_channels: + case 4: + return ModelVariantType.Normal + case 5: + # Only SD2 has a depth variant + assert base is BaseModelType.StableDiffusion2, f"unexpected unet in_channels 5 for base '{base}'" + return ModelVariantType.Depth + case 9: + return ModelVariantType.Inpaint + case _: + raise NotAMatchError(f"unrecognized unet in_channels {in_channels} for base '{base}'") + + @classmethod + def _validate_looks_like_main_model(cls, mod: ModelOnDisk) -> None: + has_main_model_keys = _has_main_keys(mod.load_state_dict()) + if not has_main_model_keys: + raise NotAMatchError("state dict does not look like a main model") + + +class Main_Checkpoint_SD1_Config(Main_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class Main_Checkpoint_SD2_Config(Main_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class Main_Checkpoint_SDXL_Config(Main_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class Main_Checkpoint_SDXLRefiner_Config(Main_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXLRefiner] = Field(default=BaseModelType.StableDiffusionXLRefiner) + + +def _get_flux_variant(state_dict: dict[str | int, Any]) -> FluxVariantType | None: + # FLUX Model variant types are distinguished by input channels and the presence of certain keys. + + # Input channels are derived from the shape of either "img_in.weight" or "model.diffusion_model.img_in.weight". + # + # Known models that use the latter key: + # - https://civitai.com/models/885098?modelVersionId=990775 + # - https://civitai.com/models/1018060?modelVersionId=1596255 + # - https://civitai.com/models/978314/ultrareal-fine-tune?modelVersionId=1413133 + # + # Input channels for known FLUX models: + # - Unquantized Dev and Schnell have in_channels=64 + # - BNB-NF4 Dev and Schnell have in_channels=1 + # - FLUX Fill has in_channels=384 + # - Unsure of quantized FLUX Fill models + # - Unsure of GGUF-quantized models + + in_channels = None + for key in {"img_in.weight", "model.diffusion_model.img_in.weight"}: + if key in state_dict: + in_channels = state_dict[key].shape[1] + break + + if in_channels is None: + # TODO(psyche): Should we have a graceful fallback here? Previously we fell back to the "normal" variant, + # but this variant is no longer used for FLUX models. If we get here, but the model is definitely a FLUX + # model, we should figure out a good fallback value. + return None + + # Because FLUX Dev and Schnell models have the same in_channels, we need to check for the presence of + # certain keys to distinguish between them. + is_flux_dev = ( + "guidance_in.out_layer.weight" in state_dict + or "model.diffusion_model.guidance_in.out_layer.weight" in state_dict + ) + + if is_flux_dev and in_channels == 384: + return FluxVariantType.DevFill + elif is_flux_dev: + return FluxVariantType.Dev + else: + # Must be a Schnell model...? + return FluxVariantType.Schnell + + +class Main_Checkpoint_FLUX_Config(Checkpoint_Config_Base, Main_Config_Base, Config_Base): + """Model config for main checkpoint models.""" + + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + + variant: FluxVariantType = Field() + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_main_model(mod) + + cls._validate_is_flux(mod) + + cls._validate_does_not_look_like_bnb_quantized(mod) + + cls._validate_does_not_look_like_gguf_quantized(mod) + + variant = override_fields.get("variant") or cls._get_variant_or_raise(mod) + + return cls(**override_fields, variant=variant) + + @classmethod + def _validate_is_flux(cls, mod: ModelOnDisk) -> None: + if not state_dict_has_any_keys_exact( + mod.load_state_dict(), + { + "double_blocks.0.img_attn.norm.key_norm.scale", + "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale", + }, + ): + raise NotAMatchError("state dict does not look like a FLUX checkpoint") + + @classmethod + def _get_variant_or_raise(cls, mod: ModelOnDisk) -> FluxVariantType: + # FLUX Model variant types are distinguished by input channels and the presence of certain keys. + state_dict = mod.load_state_dict() + variant = _get_flux_variant(state_dict) + + if variant is None: + # TODO(psyche): Should we have a graceful fallback here? Previously we fell back to the "normal" variant, + # but this variant is no longer used for FLUX models. If we get here, but the model is definitely a FLUX + # model, we should figure out a good fallback value. + raise NotAMatchError("unable to determine model variant from state dict") + + return variant + + @classmethod + def _validate_looks_like_main_model(cls, mod: ModelOnDisk) -> None: + has_main_model_keys = _has_main_keys(mod.load_state_dict()) + if not has_main_model_keys: + raise NotAMatchError("state dict does not look like a main model") + + @classmethod + def _validate_does_not_look_like_bnb_quantized(cls, mod: ModelOnDisk) -> None: + has_bnb_nf4_keys = _has_bnb_nf4_keys(mod.load_state_dict()) + if has_bnb_nf4_keys: + raise NotAMatchError("state dict looks like bnb quantized nf4") + + @classmethod + def _validate_does_not_look_like_gguf_quantized(cls, mod: ModelOnDisk): + has_ggml_tensors = _has_ggml_tensors(mod.load_state_dict()) + if has_ggml_tensors: + raise NotAMatchError("state dict looks like GGUF quantized") + + +class Main_BnBNF4_FLUX_Config(Checkpoint_Config_Base, Main_Config_Base, Config_Base): + """Model config for main checkpoint models.""" + + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + format: Literal[ModelFormat.BnbQuantizednf4b] = Field(default=ModelFormat.BnbQuantizednf4b) + + variant: FluxVariantType = Field() + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_main_model(mod) + + cls._validate_model_looks_like_bnb_quantized(mod) + + variant = override_fields.get("variant") or cls._get_variant_or_raise(mod) + + return cls(**override_fields, variant=variant) + + @classmethod + def _get_variant_or_raise(cls, mod: ModelOnDisk) -> FluxVariantType: + # FLUX Model variant types are distinguished by input channels and the presence of certain keys. + state_dict = mod.load_state_dict() + variant = _get_flux_variant(state_dict) + + if variant is None: + # TODO(psyche): Should we have a graceful fallback here? Previously we fell back to the "normal" variant, + # but this variant is no longer used for FLUX models. If we get here, but the model is definitely a FLUX + # model, we should figure out a good fallback value. + raise NotAMatchError("unable to determine model variant from state dict") + + return variant + + @classmethod + def _validate_looks_like_main_model(cls, mod: ModelOnDisk) -> None: + has_main_model_keys = _has_main_keys(mod.load_state_dict()) + if not has_main_model_keys: + raise NotAMatchError("state dict does not look like a main model") + + @classmethod + def _validate_model_looks_like_bnb_quantized(cls, mod: ModelOnDisk) -> None: + has_bnb_nf4_keys = _has_bnb_nf4_keys(mod.load_state_dict()) + if not has_bnb_nf4_keys: + raise NotAMatchError("state dict does not look like bnb quantized nf4") + + +class Main_GGUF_FLUX_Config(Checkpoint_Config_Base, Main_Config_Base, Config_Base): + """Model config for main checkpoint models.""" + + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + format: Literal[ModelFormat.GGUFQuantized] = Field(default=ModelFormat.GGUFQuantized) + + variant: FluxVariantType = Field() + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_main_model(mod) + + cls._validate_looks_like_gguf_quantized(mod) + + variant = override_fields.get("variant") or cls._get_variant_or_raise(mod) + + return cls(**override_fields, variant=variant) + + @classmethod + def _get_variant_or_raise(cls, mod: ModelOnDisk) -> FluxVariantType: + # FLUX Model variant types are distinguished by input channels and the presence of certain keys. + state_dict = mod.load_state_dict() + variant = _get_flux_variant(state_dict) + + if variant is None: + # TODO(psyche): Should we have a graceful fallback here? Previously we fell back to the "normal" variant, + # but this variant is no longer used for FLUX models. If we get here, but the model is definitely a FLUX + # model, we should figure out a good fallback value. + raise NotAMatchError("unable to determine model variant from state dict") + + return variant + + @classmethod + def _validate_looks_like_main_model(cls, mod: ModelOnDisk) -> None: + has_main_model_keys = _has_main_keys(mod.load_state_dict()) + if not has_main_model_keys: + raise NotAMatchError("state dict does not look like a main model") + + @classmethod + def _validate_looks_like_gguf_quantized(cls, mod: ModelOnDisk) -> None: + has_ggml_tensors = _has_ggml_tensors(mod.load_state_dict()) + if not has_ggml_tensors: + raise NotAMatchError("state dict does not look like GGUF quantized") + + +class Main_Diffusers_Config_Base(Diffusers_Config_Base, Main_Config_Base): + prediction_type: SchedulerPredictionType = Field() + variant: ModelVariantType = Field() + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + raise_for_class_name( + common_config_paths(mod.path), + { + # SD 1.x and 2.x + "StableDiffusionPipeline", + "StableDiffusionInpaintPipeline", + # SDXL + "StableDiffusionXLPipeline", + "StableDiffusionXLInpaintPipeline", + # SDXL Refiner + "StableDiffusionXLImg2ImgPipeline", + # TODO(psyche): Do we actually support LCM models? I don't see using this class anywhere in the codebase. + "LatentConsistencyModelPipeline", + }, + ) + + cls._validate_base(mod) + + variant = override_fields.get("variant") or cls._get_variant_or_raise(mod) + + prediction_type = override_fields.get("prediction_type") or cls._get_scheduler_prediction_type_or_raise(mod) + + repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod) + + return cls( + **override_fields, + variant=variant, + prediction_type=prediction_type, + repo_variant=repo_variant, + ) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + # Handle pipelines with a UNet (i.e SD 1.x, SD2.x, SDXL). + unet_conf = get_config_dict_or_raise(mod.path / "unet" / "config.json") + cross_attention_dim = unet_conf.get("cross_attention_dim") + match cross_attention_dim: + case 768: + return BaseModelType.StableDiffusion1 + case 1024: + return BaseModelType.StableDiffusion2 + case 1280: + return BaseModelType.StableDiffusionXLRefiner + case 2048: + return BaseModelType.StableDiffusionXL + case _: + raise NotAMatchError(f"unrecognized cross_attention_dim {cross_attention_dim}") + + @classmethod + def _get_scheduler_prediction_type_or_raise(cls, mod: ModelOnDisk) -> SchedulerPredictionType: + scheduler_conf = get_config_dict_or_raise(mod.path / "scheduler" / "scheduler_config.json") + + # TODO(psyche): Is epsilon the right default or should we raise if it's not present? + prediction_type = scheduler_conf.get("prediction_type", "epsilon") + + match prediction_type: + case "v_prediction": + return SchedulerPredictionType.VPrediction + case "epsilon": + return SchedulerPredictionType.Epsilon + case _: + raise NotAMatchError(f"unrecognized scheduler prediction_type {prediction_type}") + + @classmethod + def _get_variant_or_raise(cls, mod: ModelOnDisk) -> ModelVariantType: + base = cls.model_fields["base"].default + unet_config = get_config_dict_or_raise(mod.path / "unet" / "config.json") + in_channels = unet_config.get("in_channels") + + match in_channels: + case 4: + return ModelVariantType.Normal + case 5: + # Only SD2 has a depth variant + assert base is BaseModelType.StableDiffusion2, f"unexpected unet in_channels 5 for base '{base}'" + return ModelVariantType.Depth + case 9: + return ModelVariantType.Inpaint + case _: + raise NotAMatchError(f"unrecognized unet in_channels {in_channels} for base '{base}'") + + +class Main_Diffusers_SD1_Config(Main_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(BaseModelType.StableDiffusion1) + + +class Main_Diffusers_SD2_Config(Main_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(BaseModelType.StableDiffusion2) + + +class Main_Diffusers_SDXL_Config(Main_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(BaseModelType.StableDiffusionXL) + + +class Main_Diffusers_SDXLRefiner_Config(Main_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXLRefiner] = Field(BaseModelType.StableDiffusionXLRefiner) + + +class Main_Diffusers_SD3_Config(Diffusers_Config_Base, Main_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion3] = Field(BaseModelType.StableDiffusion3) + submodels: dict[SubModelType, SubmodelDefinition] | None = Field( + description="Loadable submodels in this model", + default=None, + ) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + # This check implies the base type - no further validation needed. + raise_for_class_name( + common_config_paths(mod.path), + { + "StableDiffusion3Pipeline", + "SD3Transformer2DModel", + }, + ) + + submodels = override_fields.get("submodels") or cls._get_submodels_or_raise(mod) + + repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod) + + return cls( + **override_fields, + submodels=submodels, + repo_variant=repo_variant, + ) + + @classmethod + def _get_submodels_or_raise(cls, mod: ModelOnDisk) -> dict[SubModelType, SubmodelDefinition]: + # Example: https://huggingface.co/stabilityai/stable-diffusion-3.5-medium/blob/main/model_index.json + config = get_config_dict_or_raise(common_config_paths(mod.path)) + + submodels: dict[SubModelType, SubmodelDefinition] = {} + + for key, value in config.items(): + # Anything that starts with an underscore is top-level metadata, not a submodel + if key.startswith("_") or not (isinstance(value, list) and len(value) == 2): + continue + # The key is something like "transformer" and is a submodel - it will be in a dir of the same name. + # The value value is something like ["diffusers", "SD3Transformer2DModel"] + _library_name, class_name = value + + match class_name: + case "CLIPTextModelWithProjection": + model_type = ModelType.CLIPEmbed + path_or_prefix = (mod.path / key).resolve().as_posix() + + # We need to read the config to determine the variant of the CLIP model. + clip_embed_config = get_config_dict_or_raise( + { + mod.path / key / "config.json", + mod.path / key / "model_index.json", + } + ) + variant = get_clip_variant_type_from_config(clip_embed_config) + submodels[SubModelType(key)] = SubmodelDefinition( + path_or_prefix=path_or_prefix, + model_type=model_type, + variant=variant, + ) + case "SD3Transformer2DModel": + model_type = ModelType.Main + path_or_prefix = (mod.path / key).resolve().as_posix() + variant = None + submodels[SubModelType(key)] = SubmodelDefinition( + path_or_prefix=path_or_prefix, + model_type=model_type, + variant=variant, + ) + case _: + pass + + return submodels + + +class Main_Diffusers_CogView4_Config(Diffusers_Config_Base, Main_Config_Base, Config_Base): + base: Literal[BaseModelType.CogView4] = Field(BaseModelType.CogView4) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + # This check implies the base type - no further validation needed. + raise_for_class_name( + common_config_paths(mod.path), + { + "CogView4Pipeline", + }, + ) + + repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod) + + return cls( + **override_fields, + repo_variant=repo_variant, + ) + + +class ExternalAPI_Config_Base(ABC, BaseModel): + """Model config for API-based models.""" + + format: Literal[ModelFormat.Api] = Field(default=ModelFormat.Api) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise NotAMatchError("External API models cannot be built from disk") + + +class Main_ExternalAPI_ChatGPT4o_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): + base: Literal[BaseModelType.ChatGPT4o] = Field(default=BaseModelType.ChatGPT4o) + + +class Main_ExternalAPI_Gemini2_5_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): + base: Literal[BaseModelType.Gemini2_5] = Field(default=BaseModelType.Gemini2_5) + + +class Main_ExternalAPI_Imagen3_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): + base: Literal[BaseModelType.Imagen3] = Field(default=BaseModelType.Imagen3) + + +class Main_ExternalAPI_Imagen4_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): + base: Literal[BaseModelType.Imagen4] = Field(default=BaseModelType.Imagen4) + + +class Main_ExternalAPI_FluxKontext_Config(ExternalAPI_Config_Base, Main_Config_Base, Config_Base): + base: Literal[BaseModelType.FluxKontext] = Field(default=BaseModelType.FluxKontext) + + +class Video_Config_Base(ABC, BaseModel): + type: Literal[ModelType.Video] = Field(default=ModelType.Video) + trigger_phrases: set[str] | None = Field(description="Set of trigger phrases for this model", default=None) + default_settings: MainModelDefaultSettings | None = Field( + description="Default settings for this model", default=None + ) + + +class Video_ExternalAPI_Veo3_Config(ExternalAPI_Config_Base, Video_Config_Base, Config_Base): + base: Literal[BaseModelType.Veo3] = Field(default=BaseModelType.Veo3) + + +class Video_ExternalAPI_Runway_Config(ExternalAPI_Config_Base, Video_Config_Base, Config_Base): + base: Literal[BaseModelType.Runway] = Field(default=BaseModelType.Runway) diff --git a/invokeai/backend/model_manager/configs/siglip.py b/invokeai/backend/model_manager/configs/siglip.py new file mode 100644 index 00000000000..62ca9494e27 --- /dev/null +++ b/invokeai/backend/model_manager/configs/siglip.py @@ -0,0 +1,44 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + common_config_paths, + raise_for_class_name, + raise_for_override_fields, + raise_if_not_dir, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + + +class SigLIP_Diffusers_Config(Diffusers_Config_Base, Config_Base): + """Model config for SigLIP.""" + + type: Literal[ModelType.SigLIP] = Field(default=ModelType.SigLIP) + format: Literal[ModelFormat.Diffusers] = Field(default=ModelFormat.Diffusers) + base: Literal[BaseModelType.Any] = Field(default=BaseModelType.Any) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + raise_for_class_name( + common_config_paths(mod.path), + { + "SiglipModel", + }, + ) + + return cls(**override_fields) diff --git a/invokeai/backend/model_manager/configs/spandrel.py b/invokeai/backend/model_manager/configs/spandrel.py new file mode 100644 index 00000000000..8ca8ad5f603 --- /dev/null +++ b/invokeai/backend/model_manager/configs/spandrel.py @@ -0,0 +1,54 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + raise_for_override_fields, + raise_if_not_file, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) +from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel + + +class Spandrel_Checkpoint_Config(Config_Base): + """Model config for Spandrel Image to Image models.""" + + base: Literal[BaseModelType.Any] = Field(default=BaseModelType.Any) + type: Literal[ModelType.SpandrelImageToImage] = Field(default=ModelType.SpandrelImageToImage) + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_spandrel_loads_model(mod) + + return cls(**override_fields) + + @classmethod + def _validate_spandrel_loads_model(cls, mod: ModelOnDisk) -> None: + try: + # It would be nice to avoid having to load the Spandrel model from disk here. A couple of options were + # explored to avoid this: + # 1. Call `SpandrelImageToImageModel.load_from_state_dict(ckpt)`, where `ckpt` is a state_dict on the meta + # device. Unfortunately, some Spandrel models perform operations during initialization that are not + # supported on meta tensors. + # 2. Spandrel has internal logic to determine a model's type from its state_dict before loading the model. + # This logic is not exposed in spandrel's public API. We could copy the logic here, but then we have to + # maintain it, and the risk of false positive detections is higher. + SpandrelImageToImageModel.load_from_file(mod.path) + except Exception as e: + raise NotAMatchError("model does not match SpandrelImageToImage heuristics") from e diff --git a/invokeai/backend/model_manager/configs/t2i_adapter.py b/invokeai/backend/model_manager/configs/t2i_adapter.py new file mode 100644 index 00000000000..a1da40e9b4b --- /dev/null +++ b/invokeai/backend/model_manager/configs/t2i_adapter.py @@ -0,0 +1,79 @@ +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.controlnet import ControlAdapterDefaultSettings +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + common_config_paths, + get_config_dict_or_raise, + raise_for_class_name, + raise_for_override_fields, + raise_if_not_dir, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + + +class T2IAdapter_Diffusers_Config_Base(Diffusers_Config_Base): + """Model config for T2I.""" + + type: Literal[ModelType.T2IAdapter] = Field(default=ModelType.T2IAdapter) + format: Literal[ModelFormat.Diffusers] = Field(default=ModelFormat.Diffusers) + default_settings: ControlAdapterDefaultSettings | None = Field(None) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + raise_for_class_name( + common_config_paths(mod.path), + { + "T2IAdapter", + }, + ) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + config_dict = get_config_dict_or_raise(common_config_paths(mod.path)) + + adapter_type = config_dict.get("adapter_type") + + match adapter_type: + case "full_adapter_xl": + return BaseModelType.StableDiffusionXL + case "full_adapter" | "light_adapter": + return BaseModelType.StableDiffusion1 + case _: + raise NotAMatchError(f"unrecognized adapter_type '{adapter_type}'") + + +class T2IAdapter_Diffusers_SD1_Config(T2IAdapter_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class T2IAdapter_Diffusers_SDXL_Config(T2IAdapter_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) diff --git a/invokeai/backend/model_manager/configs/t5_encoder.py b/invokeai/backend/model_manager/configs/t5_encoder.py new file mode 100644 index 00000000000..ed682e14304 --- /dev/null +++ b/invokeai/backend/model_manager/configs/t5_encoder.py @@ -0,0 +1,80 @@ +from typing import Any, Literal, Self + +from pydantic import Field + +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + raise_for_class_name, + raise_for_override_fields, + raise_if_not_dir, + state_dict_has_any_keys_ending_with, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType + + +class T5Encoder_T5Encoder_Config(Config_Base): + """Configuration for T5 Encoder models in a bespoke, diffusers-like format. The model weights are expected to be in + a folder called text_encoder_2 inside the model directory, with a config file named model.safetensors.index.json.""" + + base: Literal[BaseModelType.Any] = Field(default=BaseModelType.Any) + type: Literal[ModelType.T5Encoder] = Field(default=ModelType.T5Encoder) + format: Literal[ModelFormat.T5Encoder] = Field(default=ModelFormat.T5Encoder) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + expected_config_path = mod.path / "text_encoder_2" / "config.json" + expected_class_name = "T5EncoderModel" + raise_for_class_name(expected_config_path, expected_class_name) + + cls.raise_if_doesnt_have_unquantized_config_file(mod) + + return cls(**override_fields) + + @classmethod + def raise_if_doesnt_have_unquantized_config_file(cls, mod: ModelOnDisk) -> None: + has_unquantized_config = (mod.path / "text_encoder_2" / "model.safetensors.index.json").exists() + + if not has_unquantized_config: + raise NotAMatchError("missing text_encoder_2/model.safetensors.index.json") + + +class T5Encoder_BnBLLMint8_Config(Config_Base): + """Configuration for T5 Encoder models quantized by bitsandbytes' LLM.int8.""" + + base: Literal[BaseModelType.Any] = Field(default=BaseModelType.Any) + type: Literal[ModelType.T5Encoder] = Field(default=ModelType.T5Encoder) + format: Literal[ModelFormat.BnbQuantizedLlmInt8b] = Field(default=ModelFormat.BnbQuantizedLlmInt8b) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + expected_config_path = mod.path / "text_encoder_2" / "config.json" + expected_class_name = "T5EncoderModel" + raise_for_class_name(expected_config_path, expected_class_name) + + cls.raise_if_filename_doesnt_look_like_bnb_quantized(mod) + + cls.raise_if_state_dict_doesnt_look_like_bnb_quantized(mod) + + return cls(**override_fields) + + @classmethod + def raise_if_filename_doesnt_look_like_bnb_quantized(cls, mod: ModelOnDisk) -> None: + filename_looks_like_bnb = any(x for x in mod.weight_files() if "llm_int8" in x.as_posix()) + if not filename_looks_like_bnb: + raise NotAMatchError("filename does not look like bnb quantized llm_int8") + + @classmethod + def raise_if_state_dict_doesnt_look_like_bnb_quantized(cls, mod: ModelOnDisk) -> None: + has_scb_key_suffix = state_dict_has_any_keys_ending_with(mod.load_state_dict(), "SCB") + if not has_scb_key_suffix: + raise NotAMatchError("state dict does not look like bnb quantized llm_int8") diff --git a/invokeai/backend/model_manager/configs/textual_inversion.py b/invokeai/backend/model_manager/configs/textual_inversion.py new file mode 100644 index 00000000000..c827f5234d5 --- /dev/null +++ b/invokeai/backend/model_manager/configs/textual_inversion.py @@ -0,0 +1,156 @@ +from abc import ABC +from pathlib import Path +from typing import ( + Literal, + Self, +) + +import torch +from pydantic import BaseModel, Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + raise_for_override_fields, + raise_if_not_dir, + raise_if_not_file, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + + +class TI_Config_Base(ABC, BaseModel): + type: Literal[ModelType.TextualInversion] = Field(default=ModelType.TextualInversion) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk, path: Path | None = None) -> None: + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod, path) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _file_looks_like_embedding(cls, mod: ModelOnDisk, path: Path | None = None) -> bool: + try: + p = path or mod.path + + if not p.exists(): + return False + + if p.is_dir(): + return False + + if p.name in [f"learned_embeds.{s}" for s in mod.weight_files()]: + return True + + state_dict = mod.load_state_dict(p) + + # Heuristic: textual inversion embeddings have these keys + if any(key in {"string_to_param", "emb_params", "clip_g"} for key in state_dict.keys()): + return True + + # Heuristic: small state dict with all tensor values + if (len(state_dict)) < 10 and all(isinstance(v, torch.Tensor) for v in state_dict.values()): + return True + + return False + except Exception: + return False + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk, path: Path | None = None) -> BaseModelType: + p = path or mod.path + + try: + state_dict = mod.load_state_dict(p) + except Exception as e: + raise NotAMatchError(f"unable to load state dict from {p}: {e}") from e + + try: + if "string_to_token" in state_dict: + token_dim = list(state_dict["string_to_param"].values())[0].shape[-1] + elif "emb_params" in state_dict: + token_dim = state_dict["emb_params"].shape[-1] + elif "clip_g" in state_dict: + token_dim = state_dict["clip_g"].shape[-1] + else: + token_dim = list(state_dict.values())[0].shape[0] + except Exception as e: + raise NotAMatchError(f"unable to determine token dimension from state dict in {p}: {e}") from e + + match token_dim: + case 768: + return BaseModelType.StableDiffusion1 + case 1024: + return BaseModelType.StableDiffusion2 + case 1280: + return BaseModelType.StableDiffusionXL + case _: + raise NotAMatchError(f"unrecognized token dimension {token_dim}") + + +class TI_File_Config_Base(TI_Config_Base): + """Model config for textual inversion embeddings.""" + + format: Literal[ModelFormat.EmbeddingFile] = Field(default=ModelFormat.EmbeddingFile) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_file(mod) + + raise_for_override_fields(cls, override_fields) + + if not cls._file_looks_like_embedding(mod): + raise NotAMatchError("model does not look like a textual inversion embedding file") + + cls._validate_base(mod) + + return cls(**override_fields) + + +class TI_File_SD1_Config(TI_File_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class TI_File_SD2_Config(TI_File_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class TI_File_SDXL_Config(TI_File_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class TI_Folder_Config_Base(TI_Config_Base): + """Model config for textual inversion embeddings.""" + + format: Literal[ModelFormat.EmbeddingFolder] = Field(default=ModelFormat.EmbeddingFolder) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + for p in mod.weight_files(): + if cls._file_looks_like_embedding(mod, p): + cls._validate_base(mod, p) + return cls(**override_fields) + + raise NotAMatchError("model does not look like a textual inversion embedding folder") + + +class TI_Folder_SD1_Config(TI_Folder_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class TI_Folder_SD2_Config(TI_Folder_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class TI_Folder_SDXL_Config(TI_Folder_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) diff --git a/invokeai/backend/model_manager/configs/unknown.py b/invokeai/backend/model_manager/configs/unknown.py new file mode 100644 index 00000000000..2371cca089d --- /dev/null +++ b/invokeai/backend/model_manager/configs/unknown.py @@ -0,0 +1,41 @@ +from typing import Any, Literal, Self + +from pydantic import Field + +from invokeai.app.services.config.config_default import get_config +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.identification_utils import NotAMatchError +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + +app_config = get_config() + + +class Unknown_Config(Config_Base): + """Model config for unknown models, used as a fallback when we cannot positively identify a model.""" + + base: Literal[BaseModelType.Unknown] = Field(default=BaseModelType.Unknown) + type: Literal[ModelType.Unknown] = Field(default=ModelType.Unknown) + format: Literal[ModelFormat.Unknown] = Field(default=ModelFormat.Unknown) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + """Create an Unknown_Config for models that couldn't be positively identified. + + Note: Basic path validation (file extensions, directory structure) is already + performed by ModelConfigFactory before this method is called. + """ + if not app_config.allow_unknown_models: + raise NotAMatchError("unknown models are not allowed by configuration") + + return cls( + **override_fields, + # Override the type/format/base to ensure it's marked as unknown. + base=BaseModelType.Unknown, + type=ModelType.Unknown, + format=ModelFormat.Unknown, + ) diff --git a/invokeai/backend/model_manager/configs/vae.py b/invokeai/backend/model_manager/configs/vae.py new file mode 100644 index 00000000000..6c51fd770ee --- /dev/null +++ b/invokeai/backend/model_manager/configs/vae.py @@ -0,0 +1,166 @@ +import re +from typing import ( + Literal, + Self, +) + +from pydantic import Field +from typing_extensions import Any + +from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.identification_utils import ( + NotAMatchError, + common_config_paths, + get_config_dict_or_raise, + raise_for_class_name, + raise_for_override_fields, + raise_if_not_dir, + state_dict_has_any_keys_starting_with, +) +from invokeai.backend.model_manager.model_on_disk import ModelOnDisk +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelFormat, + ModelType, +) + +REGEX_TO_BASE: dict[str, BaseModelType] = { + r"xl": BaseModelType.StableDiffusionXL, + r"sd2": BaseModelType.StableDiffusion2, + r"vae": BaseModelType.StableDiffusion1, + r"FLUX.1-schnell_ae": BaseModelType.Flux, +} + + +class VAE_Checkpoint_Config_Base(Checkpoint_Config_Base): + """Model config for standalone VAE models.""" + + type: Literal[ModelType.VAE] = Field(default=ModelType.VAE) + format: Literal[ModelFormat.Checkpoint] = Field(default=ModelFormat.Checkpoint) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + cls._validate_looks_like_vae(mod) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _validate_looks_like_vae(cls, mod: ModelOnDisk) -> None: + if not state_dict_has_any_keys_starting_with( + mod.load_state_dict(), + { + "encoder.conv_in", + "decoder.conv_in", + }, + ): + raise NotAMatchError("model does not match Checkpoint VAE heuristics") + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + # Heuristic: VAEs of all architectures have a similar structure; the best we can do is guess based on name + for regexp, base in REGEX_TO_BASE.items(): + if re.search(regexp, mod.path.name, re.IGNORECASE): + return base + + raise NotAMatchError("cannot determine base type") + + +class VAE_Checkpoint_SD1_Config(VAE_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class VAE_Checkpoint_SD2_Config(VAE_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion2] = Field(default=BaseModelType.StableDiffusion2) + + +class VAE_Checkpoint_SDXL_Config(VAE_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) + + +class VAE_Checkpoint_FLUX_Config(VAE_Checkpoint_Config_Base, Config_Base): + base: Literal[BaseModelType.Flux] = Field(default=BaseModelType.Flux) + + +class VAE_Diffusers_Config_Base(Diffusers_Config_Base): + """Model config for standalone VAE models (diffusers version).""" + + type: Literal[ModelType.VAE] = Field(default=ModelType.VAE) + format: Literal[ModelFormat.Diffusers] = Field(default=ModelFormat.Diffusers) + + @classmethod + def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self: + raise_if_not_dir(mod) + + raise_for_override_fields(cls, override_fields) + + raise_for_class_name( + common_config_paths(mod.path), + { + "AutoencoderKL", + "AutoencoderTiny", + }, + ) + + cls._validate_base(mod) + + return cls(**override_fields) + + @classmethod + def _validate_base(cls, mod: ModelOnDisk) -> None: + """Raise `NotAMatch` if the model base does not match this config class.""" + expected_base = cls.model_fields["base"].default + recognized_base = cls._get_base_or_raise(mod) + if expected_base is not recognized_base: + raise NotAMatchError(f"base is {recognized_base}, not {expected_base}") + + @classmethod + def _config_looks_like_sdxl(cls, config: dict[str, Any]) -> bool: + # Heuristic: These config values that distinguish Stability's SD 1.x VAE from their SDXL VAE. + return config.get("scaling_factor", 0) == 0.13025 and config.get("sample_size") in [512, 1024] + + @classmethod + def _name_looks_like_sdxl(cls, mod: ModelOnDisk) -> bool: + # Heuristic: SD and SDXL VAE are the same shape (3-channel RGB to 4-channel float scaled down + # by a factor of 8), so we can't necessarily tell them apart by config hyperparameters. Best + # we can do is guess based on name. + return bool(re.search(r"xl\b", cls._guess_name(mod), re.IGNORECASE)) + + @classmethod + def _guess_name(cls, mod: ModelOnDisk) -> str: + name = mod.path.name + if name == "vae": + name = mod.path.parent.name + return name + + @classmethod + def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType: + config_dict = get_config_dict_or_raise(common_config_paths(mod.path)) + if cls._config_looks_like_sdxl(config_dict): + return BaseModelType.StableDiffusionXL + elif cls._name_looks_like_sdxl(mod): + return BaseModelType.StableDiffusionXL + else: + # TODO(psyche): Figure out how to positively identify SD1 here, and raise if we can't. Until then, YOLO. + return BaseModelType.StableDiffusion1 + + +class VAE_Diffusers_SD1_Config(VAE_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusion1] = Field(default=BaseModelType.StableDiffusion1) + + +class VAE_Diffusers_SDXL_Config(VAE_Diffusers_Config_Base, Config_Base): + base: Literal[BaseModelType.StableDiffusionXL] = Field(default=BaseModelType.StableDiffusionXL) diff --git a/invokeai/backend/model_manager/legacy_probe.py b/invokeai/backend/model_manager/legacy_probe.py deleted file mode 100644 index 36fd82667d7..00000000000 --- a/invokeai/backend/model_manager/legacy_probe.py +++ /dev/null @@ -1,1169 +0,0 @@ -import json -import re -from pathlib import Path -from typing import Any, Callable, Dict, Literal, Optional, Union - -import picklescan.scanner as pscan -import safetensors.torch -import spandrel -import torch - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config.config_default import get_config -from invokeai.app.util.misc import uuid_string -from invokeai.backend.flux.controlnet.state_dict_utils import ( - is_state_dict_instantx_controlnet, - is_state_dict_xlabs_controlnet, -) -from invokeai.backend.flux.flux_state_dict_utils import get_flux_in_channels_from_state_dict -from invokeai.backend.flux.ip_adapter.state_dict_utils import is_state_dict_xlabs_ip_adapter -from invokeai.backend.flux.redux.flux_redux_state_dict_utils import is_state_dict_likely_flux_redux -from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - ControlAdapterDefaultSettings, - InvalidModelConfigException, - LoraModelDefaultSettings, - MainModelDefaultSettings, - ModelConfigFactory, - SubmodelDefinition, -) -from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import ConfigLoader -from invokeai.backend.model_manager.model_on_disk import ModelOnDisk -from invokeai.backend.model_manager.taxonomy import ( - AnyVariant, - BaseModelType, - ModelFormat, - ModelRepoVariant, - ModelSourceType, - ModelType, - ModelVariantType, - SchedulerPredictionType, - SubModelType, -) -from invokeai.backend.model_manager.util.model_util import ( - get_clip_variant_type, - lora_token_vector_length, - read_checkpoint_meta, -) -from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control -from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import ( - is_state_dict_likely_in_flux_diffusers_format, -) -from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils import ( - is_state_dict_likely_in_flux_kohya_format, -) -from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_utils import ( - is_state_dict_likely_in_flux_onetrainer_format, -) -from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor -from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader -from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel -from invokeai.backend.util.silence_warnings import SilenceWarnings - -CkptType = Dict[str | int, Any] - -LEGACY_CONFIGS: Dict[BaseModelType, Dict[ModelVariantType, Union[str, Dict[SchedulerPredictionType, str]]]] = { - BaseModelType.StableDiffusion1: { - ModelVariantType.Normal: { - SchedulerPredictionType.Epsilon: "v1-inference.yaml", - SchedulerPredictionType.VPrediction: "v1-inference-v.yaml", - }, - ModelVariantType.Inpaint: "v1-inpainting-inference.yaml", - }, - BaseModelType.StableDiffusion2: { - ModelVariantType.Normal: { - SchedulerPredictionType.Epsilon: "v2-inference.yaml", - SchedulerPredictionType.VPrediction: "v2-inference-v.yaml", - }, - ModelVariantType.Inpaint: { - SchedulerPredictionType.Epsilon: "v2-inpainting-inference.yaml", - SchedulerPredictionType.VPrediction: "v2-inpainting-inference-v.yaml", - }, - ModelVariantType.Depth: "v2-midas-inference.yaml", - }, - BaseModelType.StableDiffusionXL: { - ModelVariantType.Normal: "sd_xl_base.yaml", - ModelVariantType.Inpaint: "sd_xl_inpaint.yaml", - }, - BaseModelType.StableDiffusionXLRefiner: { - ModelVariantType.Normal: "sd_xl_refiner.yaml", - }, -} - - -class ProbeBase(object): - """Base class for probes.""" - - def __init__(self, model_path: Path): - self.model_path = model_path - - def get_base_type(self) -> BaseModelType: - """Get model base type.""" - raise NotImplementedError - - def get_format(self) -> ModelFormat: - """Get model file format.""" - raise NotImplementedError - - def get_variant_type(self) -> Optional[ModelVariantType]: - """Get model variant type.""" - return None - - def get_scheduler_prediction_type(self) -> Optional[SchedulerPredictionType]: - """Get model scheduler prediction type.""" - return None - - def get_image_encoder_model_id(self) -> Optional[str]: - """Get image encoder (IP adapters only).""" - return None - - -class ModelProbe(object): - PROBES: Dict[str, Dict[ModelType, type[ProbeBase]]] = { - "diffusers": {}, - "checkpoint": {}, - "onnx": {}, - } - - CLASS2TYPE = { - "FluxPipeline": ModelType.Main, - "StableDiffusionPipeline": ModelType.Main, - "StableDiffusionInpaintPipeline": ModelType.Main, - "StableDiffusionXLPipeline": ModelType.Main, - "StableDiffusionXLImg2ImgPipeline": ModelType.Main, - "StableDiffusionXLInpaintPipeline": ModelType.Main, - "StableDiffusion3Pipeline": ModelType.Main, - "LatentConsistencyModelPipeline": ModelType.Main, - "AutoencoderKL": ModelType.VAE, - "AutoencoderTiny": ModelType.VAE, - "ControlNetModel": ModelType.ControlNet, - "CLIPVisionModelWithProjection": ModelType.CLIPVision, - "T2IAdapter": ModelType.T2IAdapter, - "CLIPModel": ModelType.CLIPEmbed, - "CLIPTextModel": ModelType.CLIPEmbed, - "T5EncoderModel": ModelType.T5Encoder, - "FluxControlNetModel": ModelType.ControlNet, - "SD3Transformer2DModel": ModelType.Main, - "CLIPTextModelWithProjection": ModelType.CLIPEmbed, - "SiglipModel": ModelType.SigLIP, - "LlavaOnevisionForConditionalGeneration": ModelType.LlavaOnevision, - "CogView4Pipeline": ModelType.Main, - } - - TYPE2VARIANT: Dict[ModelType, Callable[[str], Optional[AnyVariant]]] = {ModelType.CLIPEmbed: get_clip_variant_type} - - @classmethod - def register_probe( - cls, format: Literal["diffusers", "checkpoint", "onnx"], model_type: ModelType, probe_class: type[ProbeBase] - ) -> None: - cls.PROBES[format][model_type] = probe_class - - @classmethod - def probe( - cls, model_path: Path, fields: Optional[Dict[str, Any]] = None, hash_algo: HASHING_ALGORITHMS = "blake3_single" - ) -> AnyModelConfig: - """ - Probe the model at model_path and return its configuration record. - - :param model_path: Path to the model file (checkpoint) or directory (diffusers). - :param fields: An optional dictionary that can be used to override probed - fields. Typically used for fields that don't probe well, such as prediction_type. - - Returns: The appropriate model configuration derived from ModelConfigBase. - """ - if fields is None: - fields = {} - - model_path = model_path.resolve() - - format_type = ModelFormat.Diffusers if model_path.is_dir() else ModelFormat.Checkpoint - model_info = None - model_type = ModelType(fields["type"]) if "type" in fields and fields["type"] else None - if not model_type: - if format_type is ModelFormat.Diffusers: - model_type = cls.get_model_type_from_folder(model_path) - else: - model_type = cls.get_model_type_from_checkpoint(model_path) - format_type = ModelFormat.ONNX if model_type == ModelType.ONNX else format_type - - probe_class = cls.PROBES[format_type].get(model_type) - if not probe_class: - raise InvalidModelConfigException(f"Unhandled combination of {format_type} and {model_type}") - - probe = probe_class(model_path) - - fields["source_type"] = fields.get("source_type") or ModelSourceType.Path - fields["source"] = fields.get("source") or model_path.as_posix() - fields["key"] = fields.get("key", uuid_string()) - fields["path"] = model_path.as_posix() - fields["type"] = fields.get("type") or model_type - fields["base"] = fields.get("base") or probe.get_base_type() - variant_func = cls.TYPE2VARIANT.get(fields["type"], None) - fields["variant"] = ( - fields.get("variant") or (variant_func and variant_func(model_path.as_posix())) or probe.get_variant_type() - ) - fields["prediction_type"] = fields.get("prediction_type") or probe.get_scheduler_prediction_type() - fields["image_encoder_model_id"] = fields.get("image_encoder_model_id") or probe.get_image_encoder_model_id() - fields["name"] = fields.get("name") or cls.get_model_name(model_path) - fields["description"] = ( - fields.get("description") or f"{fields['base'].value} {model_type.value} model {fields['name']}" - ) - fields["format"] = ModelFormat(fields.get("format")) if "format" in fields else probe.get_format() - fields["hash"] = fields.get("hash") or ModelHash(algorithm=hash_algo).hash(model_path) - fields["file_size"] = fields.get("file_size") or ModelOnDisk(model_path).size() - - fields["default_settings"] = fields.get("default_settings") - - if not fields["default_settings"]: - if fields["type"] in {ModelType.ControlNet, ModelType.T2IAdapter, ModelType.ControlLoRa}: - fields["default_settings"] = get_default_settings_control_adapters(fields["name"]) - if fields["type"] in {ModelType.LoRA}: - fields["default_settings"] = get_default_settings_lora() - elif fields["type"] is ModelType.Main: - fields["default_settings"] = get_default_settings_main(fields["base"]) - - if format_type == ModelFormat.Diffusers and isinstance(probe, FolderProbeBase): - fields["repo_variant"] = fields.get("repo_variant") or probe.get_repo_variant() - - # additional fields needed for main and controlnet models - if fields["type"] in [ModelType.Main, ModelType.ControlNet, ModelType.VAE] and fields["format"] in [ - ModelFormat.Checkpoint, - ModelFormat.BnbQuantizednf4b, - ModelFormat.GGUFQuantized, - ]: - ckpt_config_path = cls._get_checkpoint_config_path( - model_path, - model_type=fields["type"], - base_type=fields["base"], - variant_type=fields["variant"], - prediction_type=fields["prediction_type"], - ) - fields["config_path"] = str(ckpt_config_path) - - # additional fields needed for main non-checkpoint models - elif fields["type"] == ModelType.Main and fields["format"] in [ - ModelFormat.ONNX, - ModelFormat.Olive, - ModelFormat.Diffusers, - ]: - fields["upcast_attention"] = fields.get("upcast_attention") or ( - fields["base"] == BaseModelType.StableDiffusion2 - and fields["prediction_type"] == SchedulerPredictionType.VPrediction - ) - - get_submodels = getattr(probe, "get_submodels", None) - if fields["base"] == BaseModelType.StableDiffusion3 and callable(get_submodels): - fields["submodels"] = get_submodels() - - model_info = ModelConfigFactory.make_config(fields) # , key=fields.get("key", None)) - return model_info - - @classmethod - def get_model_name(cls, model_path: Path) -> str: - if model_path.suffix in {".safetensors", ".bin", ".pt", ".ckpt"}: - return model_path.stem - else: - return model_path.name - - @classmethod - def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: Optional[CkptType] = None) -> ModelType: - if model_path.suffix not in (".bin", ".pt", ".ckpt", ".safetensors", ".pth", ".gguf"): - raise InvalidModelConfigException(f"{model_path}: unrecognized suffix") - - if model_path.name == "learned_embeds.bin": - return ModelType.TextualInversion - - ckpt = checkpoint if checkpoint else read_checkpoint_meta(model_path, scan=True) - ckpt = ckpt.get("state_dict", ckpt) - - if isinstance(ckpt, dict) and is_state_dict_likely_flux_control(ckpt): - return ModelType.ControlLoRa - - if isinstance(ckpt, dict) and is_state_dict_likely_flux_redux(ckpt): - return ModelType.FluxRedux - - for key in [str(k) for k in ckpt.keys()]: - if key.startswith( - ( - "cond_stage_model.", - "first_stage_model.", - "model.diffusion_model.", - # Some FLUX checkpoint files contain transformer keys prefixed with "model.diffusion_model". - # This prefix is typically used to distinguish between multiple models bundled in a single file. - "model.diffusion_model.double_blocks.", - ) - ): - # Keys starting with double_blocks are associated with Flux models - return ModelType.Main - # FLUX models in the official BFL format contain keys with the "double_blocks." prefix, but we must be - # careful to avoid false positives on XLabs FLUX IP-Adapter models. - elif key.startswith("double_blocks.") and "ip_adapter" not in key: - return ModelType.Main - elif key.startswith(("encoder.conv_in", "decoder.conv_in")): - return ModelType.VAE - elif key.startswith(("lora_te_", "lora_unet_", "lora_te1_", "lora_te2_", "lora_transformer_")): - return ModelType.LoRA - # "lora_A.weight" and "lora_B.weight" are associated with models in PEFT format. We don't support all PEFT - # LoRA models, but as of the time of writing, we support Diffusers FLUX PEFT LoRA models. - elif key.endswith(("to_k_lora.up.weight", "to_q_lora.down.weight", "lora_A.weight", "lora_B.weight")): - return ModelType.LoRA - elif key.startswith( - ( - "controlnet", - "control_model", - "input_blocks", - # XLabs FLUX ControlNet models have keys starting with "controlnet_blocks." - # For example: https://huggingface.co/XLabs-AI/flux-controlnet-collections/blob/86ab1e915a389d5857135c00e0d350e9e38a9048/flux-canny-controlnet_v2.safetensors - # TODO(ryand): This is very fragile. XLabs FLUX ControlNet models also contain keys starting with - # "double_blocks.", which we check for above. But, I'm afraid to modify this logic because it is so - # delicate. - "controlnet_blocks", - ) - ): - return ModelType.ControlNet - elif key.startswith( - ( - "image_proj.", - "ip_adapter.", - # XLabs FLUX IP-Adapter models have keys startinh with "ip_adapter_proj_model.". - "ip_adapter_proj_model.", - ) - ): - return ModelType.IPAdapter - elif key in {"emb_params", "string_to_param"}: - return ModelType.TextualInversion - - # diffusers-ti - if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()): - return ModelType.TextualInversion - - # Check if the model can be loaded as a SpandrelImageToImageModel. - # This check is intentionally performed last, as it can be expensive (it requires loading the model from disk). - try: - # It would be nice to avoid having to load the Spandrel model from disk here. A couple of options were - # explored to avoid this: - # 1. Call `SpandrelImageToImageModel.load_from_state_dict(ckpt)`, where `ckpt` is a state_dict on the meta - # device. Unfortunately, some Spandrel models perform operations during initialization that are not - # supported on meta tensors. - # 2. Spandrel has internal logic to determine a model's type from its state_dict before loading the model. - # This logic is not exposed in spandrel's public API. We could copy the logic here, but then we have to - # maintain it, and the risk of false positive detections is higher. - SpandrelImageToImageModel.load_from_file(model_path) - return ModelType.SpandrelImageToImage - except spandrel.UnsupportedModelError: - pass - except Exception as e: - logger.warning( - f"Encountered error while probing to determine if {model_path} is a Spandrel model. Ignoring. Error: {e}" - ) - - raise InvalidModelConfigException(f"Unable to determine model type for {model_path}") - - @classmethod - def get_model_type_from_folder(cls, folder_path: Path) -> ModelType: - """Get the model type of a hugging-face style folder.""" - class_name = None - error_hint = None - for suffix in ["bin", "safetensors"]: - if (folder_path / f"learned_embeds.{suffix}").exists(): - return ModelType.TextualInversion - if (folder_path / f"pytorch_lora_weights.{suffix}").exists(): - return ModelType.LoRA - if (folder_path / "unet/model.onnx").exists(): - return ModelType.ONNX - if (folder_path / "image_encoder.txt").exists(): - return ModelType.IPAdapter - - config_path = None - for p in [ - folder_path / "model_index.json", # pipeline - folder_path / "config.json", # most diffusers - folder_path / "text_encoder_2" / "config.json", # T5 text encoder - folder_path / "text_encoder" / "config.json", # T5 CLIP - ]: - if p.exists(): - config_path = p - break - - if config_path: - with open(config_path, "r") as file: - conf = json.load(file) - if "_class_name" in conf: - class_name = conf["_class_name"] - elif "architectures" in conf: - class_name = conf["architectures"][0] - else: - class_name = None - else: - error_hint = f"No model_index.json or config.json found in {folder_path}." - - if class_name and (type := cls.CLASS2TYPE.get(class_name)): - return type - else: - error_hint = f"class {class_name} is not one of the supported classes [{', '.join(cls.CLASS2TYPE.keys())}]" - - # give up - raise InvalidModelConfigException( - f"Unable to determine model type for {folder_path}" + (f"; {error_hint}" if error_hint else "") - ) - - @classmethod - def _get_checkpoint_config_path( - cls, - model_path: Path, - model_type: ModelType, - base_type: BaseModelType, - variant_type: ModelVariantType, - prediction_type: SchedulerPredictionType, - ) -> Path: - # look for a YAML file adjacent to the model file first - possible_conf = model_path.with_suffix(".yaml") - if possible_conf.exists(): - return possible_conf.absolute() - - if model_type is ModelType.Main: - if base_type == BaseModelType.Flux: - # TODO: Decide between dev/schnell - checkpoint = ModelProbe._scan_and_load_checkpoint(model_path) - state_dict = checkpoint.get("state_dict") or checkpoint - - # HACK: For FLUX, config_file is used as a key into invokeai.backend.flux.util.params during model - # loading. When FLUX support was first added, it was decided that this was the easiest way to support - # the various FLUX formats rather than adding new model types/formats. Be careful when modifying this in - # the future. - if ( - "guidance_in.out_layer.weight" in state_dict - or "model.diffusion_model.guidance_in.out_layer.weight" in state_dict - ): - if variant_type == ModelVariantType.Normal: - config_file = "flux-dev" - elif variant_type == ModelVariantType.Inpaint: - config_file = "flux-dev-fill" - else: - raise ValueError(f"Unexpected FLUX variant type: {variant_type}") - else: - config_file = "flux-schnell" - else: - config_file = LEGACY_CONFIGS[base_type][variant_type] - if isinstance(config_file, dict): # need another tier for sd-2.x models - config_file = config_file[prediction_type] - config_file = f"stable-diffusion/{config_file}" - elif model_type is ModelType.ControlNet: - config_file = ( - "controlnet/cldm_v15.yaml" - if base_type is BaseModelType.StableDiffusion1 - else "controlnet/cldm_v21.yaml" - ) - elif model_type is ModelType.VAE: - config_file = ( - # For flux, this is a key in invokeai.backend.flux.util.ae_params - # Due to model type and format being the descriminator for model configs this - # is used rather than attempting to support flux with separate model types and format - # If changed in the future, please fix me - "flux" - if base_type is BaseModelType.Flux - else "stable-diffusion/v1-inference.yaml" - if base_type is BaseModelType.StableDiffusion1 - else "stable-diffusion/sd_xl_base.yaml" - if base_type is BaseModelType.StableDiffusionXL - else "stable-diffusion/v2-inference.yaml" - ) - else: - raise InvalidModelConfigException( - f"{model_path}: Unrecognized combination of model_type={model_type}, base_type={base_type}" - ) - return Path(config_file) - - @classmethod - def _scan_and_load_checkpoint(cls, model_path: Path) -> CkptType: - with SilenceWarnings(): - if model_path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")): - cls._scan_model(model_path.name, model_path) - model = torch.load(model_path, map_location="cpu") - assert isinstance(model, dict) - return model - elif model_path.suffix.endswith(".gguf"): - return gguf_sd_loader(model_path, compute_dtype=torch.float32) - else: - return safetensors.torch.load_file(model_path) - - @classmethod - def _scan_model(cls, model_name: str, checkpoint: Path) -> None: - """ - Apply picklescanner to the indicated checkpoint and issue a warning - and option to exit if an infected file is identified. - """ - # scan model - scan_result = pscan.scan_file_path(checkpoint) - if scan_result.infected_files != 0: - if get_config().unsafe_disable_picklescan: - logger.warning( - f"The model {model_name} is potentially infected by malware, but picklescan is disabled. " - "Proceeding with caution." - ) - else: - raise RuntimeError(f"The model {model_name} is potentially infected by malware. Aborting import.") - if scan_result.scan_err: - if get_config().unsafe_disable_picklescan: - logger.warning( - f"Error scanning the model at {model_name} for malware, but picklescan is disabled. " - "Proceeding with caution." - ) - else: - raise RuntimeError(f"Error scanning the model at {model_name} for malware. Aborting import.") - - -# Probing utilities -MODEL_NAME_TO_PREPROCESSOR = { - "canny": "canny_image_processor", - "mlsd": "mlsd_image_processor", - "depth": "depth_anything_image_processor", - "bae": "normalbae_image_processor", - "normal": "normalbae_image_processor", - "sketch": "pidi_image_processor", - "scribble": "lineart_image_processor", - "lineart anime": "lineart_anime_image_processor", - "lineart_anime": "lineart_anime_image_processor", - "lineart": "lineart_image_processor", - "soft": "hed_image_processor", - "softedge": "hed_image_processor", - "hed": "hed_image_processor", - "shuffle": "content_shuffle_image_processor", - "pose": "dw_openpose_image_processor", - "mediapipe": "mediapipe_face_processor", - "pidi": "pidi_image_processor", - "zoe": "zoe_depth_image_processor", - "color": "color_map_image_processor", -} - - -def get_default_settings_control_adapters(model_name: str) -> Optional[ControlAdapterDefaultSettings]: - for k, v in MODEL_NAME_TO_PREPROCESSOR.items(): - model_name_lower = model_name.lower() - if k in model_name_lower: - return ControlAdapterDefaultSettings(preprocessor=v) - return None - - -def get_default_settings_lora() -> LoraModelDefaultSettings: - return LoraModelDefaultSettings() - - -def get_default_settings_main(model_base: BaseModelType) -> Optional[MainModelDefaultSettings]: - if model_base is BaseModelType.StableDiffusion1 or model_base is BaseModelType.StableDiffusion2: - return MainModelDefaultSettings(width=512, height=512) - elif model_base is BaseModelType.StableDiffusionXL: - return MainModelDefaultSettings(width=1024, height=1024) - # We don't provide defaults for BaseModelType.StableDiffusionXLRefiner, as they are not standalone models. - return None - - -# ##################################################3 -# Checkpoint probing -# ##################################################3 - - -class CheckpointProbeBase(ProbeBase): - def __init__(self, model_path: Path): - super().__init__(model_path) - self.checkpoint = ModelProbe._scan_and_load_checkpoint(model_path) - - def get_format(self) -> ModelFormat: - state_dict = self.checkpoint.get("state_dict") or self.checkpoint - if ( - "double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict - or "model.diffusion_model.double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict - ): - return ModelFormat.BnbQuantizednf4b - elif any(isinstance(v, GGMLTensor) for v in state_dict.values()): - return ModelFormat.GGUFQuantized - return ModelFormat("checkpoint") - - def get_variant_type(self) -> ModelVariantType: - model_type = ModelProbe.get_model_type_from_checkpoint(self.model_path, self.checkpoint) - base_type = self.get_base_type() - if model_type != ModelType.Main: - return ModelVariantType.Normal - state_dict = self.checkpoint.get("state_dict") or self.checkpoint - - if base_type == BaseModelType.Flux: - in_channels = get_flux_in_channels_from_state_dict(state_dict) - - if in_channels is None: - # If we cannot find the in_channels, we assume that this is a normal variant. Log a warning. - logger.warning( - f"{self.model_path} does not have img_in.weight or model.diffusion_model.img_in.weight key. Assuming normal variant." - ) - return ModelVariantType.Normal - - # FLUX Model variant types are distinguished by input channels: - # - Unquantized Dev and Schnell have in_channels=64 - # - BNB-NF4 Dev and Schnell have in_channels=1 - # - FLUX Fill has in_channels=384 - # - Unsure of quantized FLUX Fill models - # - Unsure of GGUF-quantized models - if in_channels == 384: - # This is a FLUX Fill model. FLUX Fill needs special handling throughout the application. The variant - # type is used to determine whether to use the fill model or the base model. - return ModelVariantType.Inpaint - else: - # Fall back on "normal" variant type for all other FLUX models. - return ModelVariantType.Normal - - in_channels = state_dict["model.diffusion_model.input_blocks.0.0.weight"].shape[1] - if in_channels == 9: - return ModelVariantType.Inpaint - elif in_channels == 5: - return ModelVariantType.Depth - elif in_channels == 4: - return ModelVariantType.Normal - else: - raise InvalidModelConfigException( - f"Cannot determine variant type (in_channels={in_channels}) at {self.model_path}" - ) - - -class PipelineCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - checkpoint = self.checkpoint - state_dict = self.checkpoint.get("state_dict") or checkpoint - if ( - "double_blocks.0.img_attn.norm.key_norm.scale" in state_dict - or "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in state_dict - ): - return BaseModelType.Flux - key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" - if key_name in state_dict and state_dict[key_name].shape[-1] == 768: - return BaseModelType.StableDiffusion1 - if key_name in state_dict and state_dict[key_name].shape[-1] == 1024: - return BaseModelType.StableDiffusion2 - key_name = "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k.weight" - if key_name in state_dict and state_dict[key_name].shape[-1] == 2048: - return BaseModelType.StableDiffusionXL - elif key_name in state_dict and state_dict[key_name].shape[-1] == 1280: - return BaseModelType.StableDiffusionXLRefiner - else: - raise InvalidModelConfigException("Cannot determine base type") - - def get_scheduler_prediction_type(self) -> SchedulerPredictionType: - """Return model prediction type.""" - type = self.get_base_type() - if type == BaseModelType.StableDiffusion2: - checkpoint = self.checkpoint - state_dict = self.checkpoint.get("state_dict") or checkpoint - key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" - if key_name in state_dict and state_dict[key_name].shape[-1] == 1024: - if "global_step" in checkpoint: - if checkpoint["global_step"] == 220000: - return SchedulerPredictionType.Epsilon - elif checkpoint["global_step"] == 110000: - return SchedulerPredictionType.VPrediction - return SchedulerPredictionType.VPrediction # a guess for sd2 ckpts - - elif type == BaseModelType.StableDiffusion1: - return SchedulerPredictionType.Epsilon # a reasonable guess for sd1 ckpts - else: - return SchedulerPredictionType.Epsilon - - -class VaeCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - # VAEs of all base types have the same structure, so we wimp out and - # guess using the name. - for regexp, basetype in [ - (r"xl", BaseModelType.StableDiffusionXL), - (r"sd2", BaseModelType.StableDiffusion2), - (r"vae", BaseModelType.StableDiffusion1), - (r"FLUX.1-schnell_ae", BaseModelType.Flux), - ]: - if re.search(regexp, self.model_path.name, re.IGNORECASE): - return basetype - raise InvalidModelConfigException("Cannot determine base type") - - -class LoRACheckpointProbe(CheckpointProbeBase): - """Class for LoRA checkpoints.""" - - def get_format(self) -> ModelFormat: - if is_state_dict_likely_in_flux_diffusers_format(self.checkpoint): - # TODO(ryand): This is an unusual case. In other places throughout the codebase, we treat - # ModelFormat.Diffusers as meaning that the model is in a directory. In this case, the model is a single - # file, but the weight keys are in the diffusers format. - return ModelFormat.Diffusers - return ModelFormat.LyCORIS - - def get_base_type(self) -> BaseModelType: - if ( - is_state_dict_likely_in_flux_kohya_format(self.checkpoint) - or is_state_dict_likely_in_flux_onetrainer_format(self.checkpoint) - or is_state_dict_likely_in_flux_diffusers_format(self.checkpoint) - or is_state_dict_likely_flux_control(self.checkpoint) - ): - return BaseModelType.Flux - - # If we've gotten here, we assume that the model is a Stable Diffusion model. - token_vector_length = lora_token_vector_length(self.checkpoint) - if token_vector_length == 768: - return BaseModelType.StableDiffusion1 - elif token_vector_length == 1024: - return BaseModelType.StableDiffusion2 - elif token_vector_length == 1280: - return BaseModelType.StableDiffusionXL # recognizes format at https://civitai.com/models/224641 - elif token_vector_length == 2048: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelConfigException(f"Unknown LoRA type: {self.model_path}") - - -class TextualInversionCheckpointProbe(CheckpointProbeBase): - """Class for probing embeddings.""" - - def get_format(self) -> ModelFormat: - return ModelFormat.EmbeddingFile - - def get_base_type(self) -> BaseModelType: - checkpoint = self.checkpoint - if "string_to_token" in checkpoint: - token_dim = list(checkpoint["string_to_param"].values())[0].shape[-1] - elif "emb_params" in checkpoint: - token_dim = checkpoint["emb_params"].shape[-1] - elif "clip_g" in checkpoint: - token_dim = checkpoint["clip_g"].shape[-1] - else: - token_dim = list(checkpoint.values())[0].shape[0] - if token_dim == 768: - return BaseModelType.StableDiffusion1 - elif token_dim == 1024: - return BaseModelType.StableDiffusion2 - elif token_dim == 1280: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelConfigException(f"{self.model_path}: Could not determine base type") - - -class ControlNetCheckpointProbe(CheckpointProbeBase): - """Class for probing controlnets.""" - - def get_base_type(self) -> BaseModelType: - checkpoint = self.checkpoint - if is_state_dict_xlabs_controlnet(checkpoint) or is_state_dict_instantx_controlnet(checkpoint): - # TODO(ryand): Should I distinguish between XLabs, InstantX and other ControlNet models by implementing - # get_format()? - return BaseModelType.Flux - - for key_name in ( - "control_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", - "controlnet_mid_block.bias", - "input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", - "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight", - ): - if key_name not in checkpoint: - continue - width = checkpoint[key_name].shape[-1] - if width == 768: - return BaseModelType.StableDiffusion1 - elif width == 1024: - return BaseModelType.StableDiffusion2 - elif width == 2048: - return BaseModelType.StableDiffusionXL - elif width == 1280: - return BaseModelType.StableDiffusionXL - raise InvalidModelConfigException(f"{self.model_path}: Unable to determine base type") - - -class IPAdapterCheckpointProbe(CheckpointProbeBase): - """Class for probing IP Adapters""" - - def get_base_type(self) -> BaseModelType: - checkpoint = self.checkpoint - - if is_state_dict_xlabs_ip_adapter(checkpoint): - return BaseModelType.Flux - - for key in checkpoint.keys(): - if not key.startswith(("image_proj.", "ip_adapter.")): - continue - cross_attention_dim = checkpoint["ip_adapter.1.to_k_ip.weight"].shape[-1] - if cross_attention_dim == 768: - return BaseModelType.StableDiffusion1 - elif cross_attention_dim == 1024: - return BaseModelType.StableDiffusion2 - elif cross_attention_dim == 2048: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelConfigException( - f"IP-Adapter had unexpected cross-attention dimension: {cross_attention_dim}." - ) - raise InvalidModelConfigException(f"{self.model_path}: Unable to determine base type") - - -class CLIPVisionCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -class T2IAdapterCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -class SpandrelImageToImageCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - return BaseModelType.Any - - -class SigLIPCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -class FluxReduxCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - return BaseModelType.Flux - - -class LlavaOnevisionCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -######################################################## -# classes for probing folders -####################################################### -class FolderProbeBase(ProbeBase): - def get_variant_type(self) -> ModelVariantType: - return ModelVariantType.Normal - - def get_format(self) -> ModelFormat: - return ModelFormat("diffusers") - - def get_repo_variant(self) -> ModelRepoVariant: - # get all files ending in .bin or .safetensors - weight_files = list(self.model_path.glob("**/*.safetensors")) - weight_files.extend(list(self.model_path.glob("**/*.bin"))) - for x in weight_files: - if ".fp16" in x.suffixes: - return ModelRepoVariant.FP16 - if "openvino_model" in x.name: - return ModelRepoVariant.OpenVINO - if "flax_model" in x.name: - return ModelRepoVariant.Flax - if x.suffix == ".onnx": - return ModelRepoVariant.ONNX - return ModelRepoVariant.Default - - -class PipelineFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - # Handle pipelines with a UNet (i.e SD 1.x, SD2, SDXL). - config_path = self.model_path / "unet" / "config.json" - if config_path.exists(): - with open(config_path) as file: - unet_conf = json.load(file) - if unet_conf["cross_attention_dim"] == 768: - return BaseModelType.StableDiffusion1 - elif unet_conf["cross_attention_dim"] == 1024: - return BaseModelType.StableDiffusion2 - elif unet_conf["cross_attention_dim"] == 1280: - return BaseModelType.StableDiffusionXLRefiner - elif unet_conf["cross_attention_dim"] == 2048: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelConfigException(f"Unknown base model for {self.model_path}") - - # Handle pipelines with a transformer (i.e. SD3). - config_path = self.model_path / "transformer" / "config.json" - if config_path.exists(): - with open(config_path) as file: - transformer_conf = json.load(file) - if transformer_conf["_class_name"] == "SD3Transformer2DModel": - return BaseModelType.StableDiffusion3 - elif transformer_conf["_class_name"] == "CogView4Transformer2DModel": - return BaseModelType.CogView4 - else: - raise InvalidModelConfigException(f"Unknown base model for {self.model_path}") - - raise InvalidModelConfigException(f"Unknown base model for {self.model_path}") - - def get_scheduler_prediction_type(self) -> SchedulerPredictionType: - with open(self.model_path / "scheduler" / "scheduler_config.json", "r") as file: - scheduler_conf = json.load(file) - if scheduler_conf.get("prediction_type", "epsilon") == "v_prediction": - return SchedulerPredictionType.VPrediction - elif scheduler_conf.get("prediction_type", "epsilon") == "epsilon": - return SchedulerPredictionType.Epsilon - else: - raise InvalidModelConfigException("Unknown scheduler prediction type: {scheduler_conf['prediction_type']}") - - def get_submodels(self) -> Dict[SubModelType, SubmodelDefinition]: - config = ConfigLoader.load_config(self.model_path, config_name="model_index.json") - submodels: Dict[SubModelType, SubmodelDefinition] = {} - for key, value in config.items(): - if key.startswith("_") or not (isinstance(value, list) and len(value) == 2): - continue - model_loader = str(value[1]) - if model_type := ModelProbe.CLASS2TYPE.get(model_loader): - variant_func = ModelProbe.TYPE2VARIANT.get(model_type, None) - submodels[SubModelType(key)] = SubmodelDefinition( - path_or_prefix=(self.model_path / key).resolve().as_posix(), - model_type=model_type, - variant=variant_func and variant_func((self.model_path / key).as_posix()), - ) - - return submodels - - def get_variant_type(self) -> ModelVariantType: - # This only works for pipelines! Any kind of - # exception results in our returning the - # "normal" variant type - try: - config_file = self.model_path / "unet" / "config.json" - with open(config_file, "r") as file: - conf = json.load(file) - - in_channels = conf["in_channels"] - if in_channels == 9: - return ModelVariantType.Inpaint - elif in_channels == 5: - return ModelVariantType.Depth - elif in_channels == 4: - return ModelVariantType.Normal - except Exception: - pass - return ModelVariantType.Normal - - -class VaeFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - if self._config_looks_like_sdxl(): - return BaseModelType.StableDiffusionXL - elif self._name_looks_like_sdxl(): - # but SD and SDXL VAE are the same shape (3-channel RGB to 4-channel float scaled down - # by a factor of 8), we can't necessarily tell them apart by config hyperparameters. - return BaseModelType.StableDiffusionXL - else: - return BaseModelType.StableDiffusion1 - - def _config_looks_like_sdxl(self) -> bool: - # config values that distinguish Stability's SD 1.x VAE from their SDXL VAE. - config_file = self.model_path / "config.json" - if not config_file.exists(): - raise InvalidModelConfigException(f"Cannot determine base type for {self.model_path}") - with open(config_file, "r") as file: - config = json.load(file) - return config.get("scaling_factor", 0) == 0.13025 and config.get("sample_size") in [512, 1024] - - def _name_looks_like_sdxl(self) -> bool: - return bool(re.search(r"xl\b", self._guess_name(), re.IGNORECASE)) - - def _guess_name(self) -> str: - name = self.model_path.name - if name == "vae": - name = self.model_path.parent.name - return name - - -class TextualInversionFolderProbe(FolderProbeBase): - def get_format(self) -> ModelFormat: - return ModelFormat.EmbeddingFolder - - def get_base_type(self) -> BaseModelType: - path = self.model_path / "learned_embeds.bin" - if not path.exists(): - raise InvalidModelConfigException( - f"{self.model_path.as_posix()} does not contain expected 'learned_embeds.bin' file" - ) - return TextualInversionCheckpointProbe(path).get_base_type() - - -class T5EncoderFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - return BaseModelType.Any - - def get_format(self) -> ModelFormat: - path = self.model_path / "text_encoder_2" - if (path / "model.safetensors.index.json").exists(): - return ModelFormat.T5Encoder - files = list(path.glob("*.safetensors")) - if len(files) == 0: - raise InvalidModelConfigException(f"{self.model_path.as_posix()}: no .safetensors files found") - - # shortcut: look for the quantization in the name - if any(x for x in files if "llm_int8" in x.as_posix()): - return ModelFormat.BnbQuantizedLlmInt8b - - # more reliable path: probe contents for a 'SCB' key - ckpt = read_checkpoint_meta(files[0], scan=True) - if any("SCB" in x for x in ckpt.keys()): - return ModelFormat.BnbQuantizedLlmInt8b - - raise InvalidModelConfigException(f"{self.model_path.as_posix()}: unknown model format") - - -class ONNXFolderProbe(PipelineFolderProbe): - def get_base_type(self) -> BaseModelType: - # Due to the way the installer is set up, the configuration file for safetensors - # will come along for the ride if both the onnx and safetensors forms - # share the same directory. We take advantage of this here. - if (self.model_path / "unet" / "config.json").exists(): - return super().get_base_type() - else: - logger.warning('Base type probing is not implemented for ONNX models. Assuming "sd-1"') - return BaseModelType.StableDiffusion1 - - def get_format(self) -> ModelFormat: - return ModelFormat("onnx") - - def get_variant_type(self) -> ModelVariantType: - return ModelVariantType.Normal - - -class ControlNetFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - config_file = self.model_path / "config.json" - if not config_file.exists(): - raise InvalidModelConfigException(f"Cannot determine base type for {self.model_path}") - with open(config_file, "r") as file: - config = json.load(file) - - if config.get("_class_name", None) == "FluxControlNetModel": - return BaseModelType.Flux - - # no obvious way to distinguish between sd2-base and sd2-768 - dimension = config["cross_attention_dim"] - if dimension == 768: - return BaseModelType.StableDiffusion1 - if dimension == 1024: - return BaseModelType.StableDiffusion2 - if dimension == 2048: - return BaseModelType.StableDiffusionXL - raise InvalidModelConfigException(f"Unable to determine model base for {self.model_path}") - - -class LoRAFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - model_file = None - for suffix in ["safetensors", "bin"]: - base_file = self.model_path / f"pytorch_lora_weights.{suffix}" - if base_file.exists(): - model_file = base_file - break - if not model_file: - raise InvalidModelConfigException("Unknown LoRA format encountered") - return LoRACheckpointProbe(model_file).get_base_type() - - -class IPAdapterFolderProbe(FolderProbeBase): - def get_format(self) -> ModelFormat: - return ModelFormat.InvokeAI - - def get_base_type(self) -> BaseModelType: - model_file = self.model_path / "ip_adapter.bin" - if not model_file.exists(): - raise InvalidModelConfigException("Unknown IP-Adapter model format.") - - state_dict = torch.load(model_file, map_location="cpu") - cross_attention_dim = state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[-1] - if cross_attention_dim == 768: - return BaseModelType.StableDiffusion1 - elif cross_attention_dim == 1024: - return BaseModelType.StableDiffusion2 - elif cross_attention_dim == 2048: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelConfigException( - f"IP-Adapter had unexpected cross-attention dimension: {cross_attention_dim}." - ) - - def get_image_encoder_model_id(self) -> Optional[str]: - encoder_id_path = self.model_path / "image_encoder.txt" - if not encoder_id_path.exists(): - return None - with open(encoder_id_path, "r") as f: - image_encoder_model = f.readline().strip() - return image_encoder_model - - -class CLIPVisionFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - return BaseModelType.Any - - -class CLIPEmbedFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - return BaseModelType.Any - - -class SpandrelImageToImageFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -class SigLIPFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - return BaseModelType.Any - - -class FluxReduxFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -class LlaveOnevisionFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - return BaseModelType.Any - - -class T2IAdapterFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - config_file = self.model_path / "config.json" - if not config_file.exists(): - raise InvalidModelConfigException(f"Cannot determine base type for {self.model_path}") - with open(config_file, "r") as file: - config = json.load(file) - - adapter_type = config.get("adapter_type", None) - if adapter_type == "full_adapter_xl": - return BaseModelType.StableDiffusionXL - elif adapter_type == "full_adapter" or "light_adapter": - # I haven't seen any T2I adapter models for SD2, so assume that this is an SD1 adapter. - return BaseModelType.StableDiffusion1 - else: - raise InvalidModelConfigException( - f"Unable to determine base model for '{self.model_path}' (adapter_type = {adapter_type})." - ) - - -# Register probe classes -ModelProbe.register_probe("diffusers", ModelType.Main, PipelineFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.VAE, VaeFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.LoRA, LoRAFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.ControlLoRa, LoRAFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.TextualInversion, TextualInversionFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.T5Encoder, T5EncoderFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.ControlNet, ControlNetFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.IPAdapter, IPAdapterFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.CLIPEmbed, CLIPEmbedFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.CLIPVision, CLIPVisionFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.T2IAdapter, T2IAdapterFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.SpandrelImageToImage, SpandrelImageToImageFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.SigLIP, SigLIPFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.FluxRedux, FluxReduxFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.LlavaOnevision, LlaveOnevisionFolderProbe) - -ModelProbe.register_probe("checkpoint", ModelType.Main, PipelineCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.VAE, VaeCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.LoRA, LoRACheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.ControlLoRa, LoRACheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.TextualInversion, TextualInversionCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.ControlNet, ControlNetCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.IPAdapter, IPAdapterCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.CLIPVision, CLIPVisionCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.T2IAdapter, T2IAdapterCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.SpandrelImageToImage, SpandrelImageToImageCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.SigLIP, SigLIPCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.FluxRedux, FluxReduxCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.LlavaOnevision, LlavaOnevisionCheckpointProbe) - -ModelProbe.register_probe("onnx", ModelType.ONNX, ONNXFolderProbe) diff --git a/invokeai/backend/model_manager/load/load_base.py b/invokeai/backend/model_manager/load/load_base.py index 458fc0cfc0c..a4004afba75 100644 --- a/invokeai/backend/model_manager/load/load_base.py +++ b/invokeai/backend/model_manager/load/load_base.py @@ -12,9 +12,7 @@ import torch from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.model_manager.config import ( - AnyModelConfig, -) +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache from invokeai.backend.model_manager.taxonomy import AnyModel, SubModelType @@ -91,14 +89,6 @@ def __init__(self, config: Optional[AnyModelConfig], cache_record: CacheRecord, self.config = config -# TODO(MM2): -# Some "intermediary" subclasses in the ModelLoaderBase class hierarchy define methods that their subclasses don't -# know about. I think the problem may be related to this class being an ABC. -# -# For example, GenericDiffusersLoader defines `get_hf_load_class()`, and StableDiffusionDiffusersModel attempts to -# call it. However, the method is not defined in the ABC, so it is not guaranteed to be implemented. - - class ModelLoaderBase(ABC): """Abstract base class for loading models into RAM/VRAM.""" diff --git a/invokeai/backend/model_manager/load/load_default.py b/invokeai/backend/model_manager/load/load_default.py index 3c26a956b76..3fb7a574f31 100644 --- a/invokeai/backend/model_manager/load/load_default.py +++ b/invokeai/backend/model_manager/load/load_default.py @@ -6,7 +6,8 @@ from typing import Optional from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.model_manager.config import AnyModelConfig, DiffusersConfigBase, InvalidModelConfigException +from invokeai.backend.model_manager.configs.base import Diffusers_Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoaderBase from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache, get_model_cache_key @@ -50,7 +51,7 @@ def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubMo model_path = self._get_model_path(model_config) if not model_path.exists(): - raise InvalidModelConfigException(f"Files for model '{model_config.name}' not found at {model_path}") + raise FileNotFoundError(f"Files for model '{model_config.name}' not found at {model_path}") with skip_torch_weight_init(): cache_record = self._load_and_cache(model_config, submodel_type) @@ -90,7 +91,7 @@ def get_size_fs( return calc_model_size_by_fs( model_path=model_path, subfolder=submodel_type.value if submodel_type else None, - variant=config.repo_variant if isinstance(config, DiffusersConfigBase) else None, + variant=config.repo_variant if isinstance(config, Diffusers_Config_Base) else None, ) # This needs to be implemented in the subclass diff --git a/invokeai/backend/model_manager/load/model_loader_registry.py b/invokeai/backend/model_manager/load/model_loader_registry.py index ecc4d1fe93b..ca9ea56edbe 100644 --- a/invokeai/backend/model_manager/load/model_loader_registry.py +++ b/invokeai/backend/model_manager/load/model_loader_registry.py @@ -18,10 +18,8 @@ from abc import ABC, abstractmethod from typing import Callable, Dict, Optional, Tuple, Type, TypeVar -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - ModelConfigBase, -) +from invokeai.backend.model_manager.configs.base import Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load import ModelLoaderBase from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType, SubModelType @@ -40,7 +38,7 @@ def register( @abstractmethod def get_implementation( cls, config: AnyModelConfig, submodel_type: Optional[SubModelType] - ) -> Tuple[Type[ModelLoaderBase], ModelConfigBase, Optional[SubModelType]]: + ) -> Tuple[Type[ModelLoaderBase], Config_Base, Optional[SubModelType]]: """ Get subclass of ModelLoaderBase registered to handle base and type. @@ -84,7 +82,7 @@ def decorator(subclass: Type[TModelLoader]) -> Type[TModelLoader]: @classmethod def get_implementation( cls, config: AnyModelConfig, submodel_type: Optional[SubModelType] - ) -> Tuple[Type[ModelLoaderBase], ModelConfigBase, Optional[SubModelType]]: + ) -> Tuple[Type[ModelLoaderBase], Config_Base, Optional[SubModelType]]: """Get subclass of ModelLoaderBase registered to handle base and type.""" key1 = cls._to_registry_key(config.base, config.type, config.format) # for a specific base type diff --git a/invokeai/backend/model_manager/load/model_loaders/clip_vision.py b/invokeai/backend/model_manager/load/model_loaders/clip_vision.py index 29d7bc691cf..0150e24248f 100644 --- a/invokeai/backend/model_manager/load/model_loaders/clip_vision.py +++ b/invokeai/backend/model_manager/load/model_loaders/clip_vision.py @@ -3,10 +3,8 @@ from transformers import CLIPVisionModelWithProjection -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - DiffusersConfigBase, -) +from invokeai.backend.model_manager.configs.base import Diffusers_Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType @@ -21,7 +19,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, DiffusersConfigBase): + if not isinstance(config, Diffusers_Config_Base): raise ValueError("Only DiffusersConfigBase models are currently supported here.") if submodel_type is not None: diff --git a/invokeai/backend/model_manager/load/model_loaders/cogview4.py b/invokeai/backend/model_manager/load/model_loaders/cogview4.py index e7669a33c42..782ff38450c 100644 --- a/invokeai/backend/model_manager/load/model_loaders/cogview4.py +++ b/invokeai/backend/model_manager/load/model_loaders/cogview4.py @@ -3,11 +3,8 @@ import torch -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - CheckpointConfigBase, - DiffusersConfigBase, -) +from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader from invokeai.backend.model_manager.taxonomy import ( @@ -28,7 +25,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if isinstance(config, CheckpointConfigBase): + if isinstance(config, Checkpoint_Config_Base): raise NotImplementedError("CheckpointConfigBase is not implemented for CogView4 models.") if submodel_type is None: @@ -36,7 +33,7 @@ def _load_model( model_path = Path(config.path) load_class = self.get_hf_load_class(model_path, submodel_type) - repo_variant = config.repo_variant if isinstance(config, DiffusersConfigBase) else None + repo_variant = config.repo_variant if isinstance(config, Diffusers_Config_Base) else None variant = repo_variant.value if repo_variant else None model_path = model_path / submodel_type.value diff --git a/invokeai/backend/model_manager/load/model_loaders/controlnet.py b/invokeai/backend/model_manager/load/model_loaders/controlnet.py index 5bf93db3816..8fd1796b8f5 100644 --- a/invokeai/backend/model_manager/load/model_loaders/controlnet.py +++ b/invokeai/backend/model_manager/load/model_loaders/controlnet.py @@ -5,10 +5,8 @@ from diffusers import ControlNetModel -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - ControlNetCheckpointConfig, -) +from invokeai.backend.model_manager.configs.controlnet import ControlNet_Checkpoint_Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader from invokeai.backend.model_manager.taxonomy import ( @@ -46,7 +44,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if isinstance(config, ControlNetCheckpointConfig): + if isinstance(config, ControlNet_Checkpoint_Config_Base): return ControlNetModel.from_single_file( config.path, torch_dtype=self._torch_dtype, diff --git a/invokeai/backend/model_manager/load/model_loaders/flux.py b/invokeai/backend/model_manager/load/model_loaders/flux.py index 6ea7b539252..e44ddec382c 100644 --- a/invokeai/backend/model_manager/load/model_loaders/flux.py +++ b/invokeai/backend/model_manager/load/model_loaders/flux.py @@ -33,27 +33,29 @@ from invokeai.backend.flux.model import Flux from invokeai.backend.flux.modules.autoencoder import AutoEncoder from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel -from invokeai.backend.flux.util import ae_params, params -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - CheckpointConfigBase, - CLIPEmbedDiffusersConfig, - ControlNetCheckpointConfig, - ControlNetDiffusersConfig, - FluxReduxConfig, - IPAdapterCheckpointConfig, - MainBnbQuantized4bCheckpointConfig, - MainCheckpointConfig, - MainGGUFCheckpointConfig, - T5EncoderBnbQuantizedLlmInt8bConfig, - T5EncoderConfig, - VAECheckpointConfig, +from invokeai.backend.flux.util import get_flux_ae_params, get_flux_transformers_params +from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base +from invokeai.backend.model_manager.configs.clip_embed import CLIPEmbed_Diffusers_Config_Base +from invokeai.backend.model_manager.configs.controlnet import ( + ControlNet_Checkpoint_Config_Base, + ControlNet_Diffusers_Config_Base, ) +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.configs.flux_redux import FLUXRedux_Checkpoint_Config +from invokeai.backend.model_manager.configs.ip_adapter import IPAdapter_Checkpoint_Config_Base +from invokeai.backend.model_manager.configs.main import ( + Main_BnBNF4_FLUX_Config, + Main_Checkpoint_FLUX_Config, + Main_GGUF_FLUX_Config, +) +from invokeai.backend.model_manager.configs.t5_encoder import T5Encoder_BnBLLMint8_Config, T5Encoder_T5Encoder_Config +from invokeai.backend.model_manager.configs.vae import VAE_Checkpoint_Config_Base from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.taxonomy import ( AnyModel, BaseModelType, + FluxVariantType, ModelFormat, ModelType, SubModelType, @@ -85,12 +87,12 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, VAECheckpointConfig): + if not isinstance(config, VAE_Checkpoint_Config_Base): raise ValueError("Only VAECheckpointConfig models are currently supported here.") model_path = Path(config.path) with accelerate.init_empty_weights(): - model = AutoEncoder(ae_params[config.config_path]) + model = AutoEncoder(get_flux_ae_params()) sd = load_file(model_path) model.load_state_dict(sd, assign=True) # VAE is broken in float16, which mps defaults to @@ -107,7 +109,7 @@ def _load_model( @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPEmbed, format=ModelFormat.Diffusers) -class ClipCheckpointModel(ModelLoader): +class CLIPDiffusersLoader(ModelLoader): """Class to load main models.""" def _load_model( @@ -115,7 +117,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, CLIPEmbedDiffusersConfig): + if not isinstance(config, CLIPEmbed_Diffusers_Config_Base): raise ValueError("Only CLIPEmbedDiffusersConfig models are currently supported here.") match submodel_type: @@ -138,7 +140,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, T5EncoderBnbQuantizedLlmInt8bConfig): + if not isinstance(config, T5Encoder_BnBLLMint8_Config): raise ValueError("Only T5EncoderBnbQuantizedLlmInt8bConfig models are currently supported here.") if not bnb_available: raise ImportError( @@ -185,7 +187,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, T5EncoderConfig): + if not isinstance(config, T5Encoder_T5Encoder_Config): raise ValueError("Only T5EncoderConfig models are currently supported here.") match submodel_type: @@ -210,7 +212,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, CheckpointConfigBase): + if not isinstance(config, Checkpoint_Config_Base): raise ValueError("Only CheckpointConfigBase models are currently supported here.") match submodel_type: @@ -225,11 +227,11 @@ def _load_from_singlefile( self, config: AnyModelConfig, ) -> AnyModel: - assert isinstance(config, MainCheckpointConfig) + assert isinstance(config, Main_Checkpoint_FLUX_Config) model_path = Path(config.path) with accelerate.init_empty_weights(): - model = Flux(params[config.config_path]) + model = Flux(get_flux_transformers_params(config.variant)) sd = load_file(model_path) if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd: @@ -252,7 +254,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, CheckpointConfigBase): + if not isinstance(config, Checkpoint_Config_Base): raise ValueError("Only CheckpointConfigBase models are currently supported here.") match submodel_type: @@ -267,11 +269,11 @@ def _load_from_singlefile( self, config: AnyModelConfig, ) -> AnyModel: - assert isinstance(config, MainGGUFCheckpointConfig) + assert isinstance(config, Main_GGUF_FLUX_Config) model_path = Path(config.path) with accelerate.init_empty_weights(): - model = Flux(params[config.config_path]) + model = Flux(get_flux_transformers_params(config.variant)) # HACK(ryand): We shouldn't be hard-coding the compute_dtype here. sd = gguf_sd_loader(model_path, compute_dtype=torch.bfloat16) @@ -298,7 +300,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, CheckpointConfigBase): + if not isinstance(config, Checkpoint_Config_Base): raise ValueError("Only CheckpointConfigBase models are currently supported here.") match submodel_type: @@ -313,7 +315,7 @@ def _load_from_singlefile( self, config: AnyModelConfig, ) -> AnyModel: - assert isinstance(config, MainBnbQuantized4bCheckpointConfig) + assert isinstance(config, Main_BnBNF4_FLUX_Config) if not bnb_available: raise ImportError( "The bnb modules are not available. Please install bitsandbytes if available on your platform." @@ -322,7 +324,7 @@ def _load_from_singlefile( with SilenceWarnings(): with accelerate.init_empty_weights(): - model = Flux(params[config.config_path]) + model = Flux(get_flux_transformers_params(config.variant)) model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.bfloat16) sd = load_file(model_path) if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd: @@ -341,9 +343,9 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if isinstance(config, ControlNetCheckpointConfig): + if isinstance(config, ControlNet_Checkpoint_Config_Base): model_path = Path(config.path) - elif isinstance(config, ControlNetDiffusersConfig): + elif isinstance(config, ControlNet_Diffusers_Config_Base): # If this is a diffusers directory, we simply ignore the config file and load from the weight file. model_path = Path(config.path) / "diffusion_pytorch_model.safetensors" else: @@ -362,7 +364,7 @@ def _load_model( def _load_xlabs_controlnet(self, sd: dict[str, torch.Tensor]) -> AnyModel: with accelerate.init_empty_weights(): # HACK(ryand): Is it safe to assume dev here? - model = XLabsControlNetFlux(params["flux-dev"]) + model = XLabsControlNetFlux(get_flux_transformers_params(FluxVariantType.Dev)) model.load_state_dict(sd, assign=True) return model @@ -388,7 +390,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, IPAdapterCheckpointConfig): + if not isinstance(config, IPAdapter_Checkpoint_Config_Base): raise ValueError(f"Unexpected model config type: {type(config)}.") sd = load_file(Path(config.path)) @@ -411,7 +413,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if not isinstance(config, FluxReduxConfig): + if not isinstance(config, FLUXRedux_Checkpoint_Config): raise ValueError(f"Unexpected model config type: {type(config)}.") sd = load_file(Path(config.path)) diff --git a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py index 8a690583d5d..b888c69edf9 100644 --- a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py +++ b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py @@ -8,7 +8,8 @@ from diffusers.configuration_utils import ConfigMixin from diffusers.models.modeling_utils import ModelMixin -from invokeai.backend.model_manager.config import AnyModelConfig, DiffusersConfigBase, InvalidModelConfigException +from invokeai.backend.model_manager.configs.base import Diffusers_Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.taxonomy import ( @@ -33,7 +34,7 @@ def _load_model( model_class = self.get_hf_load_class(model_path) if submodel_type is not None: raise Exception(f"There are no submodels in models of type {model_class}") - repo_variant = config.repo_variant if isinstance(config, DiffusersConfigBase) else None + repo_variant = config.repo_variant if isinstance(config, Diffusers_Config_Base) else None variant = repo_variant.value if repo_variant else None try: result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant) @@ -56,9 +57,7 @@ def get_hf_load_class(self, model_path: Path, submodel_type: Optional[SubModelTy module, class_name = config[submodel_type.value] result = self._hf_definition_to_type(module=module, class_name=class_name) except KeyError as e: - raise InvalidModelConfigException( - f'The "{submodel_type}" submodel is not available for this model.' - ) from e + raise ValueError(f'The "{submodel_type}" submodel is not available for this model.') from e else: try: config = self._load_diffusers_config(model_path, config_name="config.json") @@ -67,9 +66,9 @@ def get_hf_load_class(self, model_path: Path, submodel_type: Optional[SubModelTy elif class_name := config.get("architectures"): result = self._hf_definition_to_type(module="transformers", class_name=class_name[0]) else: - raise InvalidModelConfigException("Unable to decipher Load Class based on given config.json") + raise RuntimeError("Unable to decipher Load Class based on given config.json") except KeyError as e: - raise InvalidModelConfigException("An expected config.json file is missing from this model.") from e + raise ValueError("An expected config.json file is missing from this model.") from e assert result is not None return result diff --git a/invokeai/backend/model_manager/load/model_loaders/ip_adapter.py b/invokeai/backend/model_manager/load/model_loaders/ip_adapter.py index d103bc5dbcb..d133a36498c 100644 --- a/invokeai/backend/model_manager/load/model_loaders/ip_adapter.py +++ b/invokeai/backend/model_manager/load/model_loaders/ip_adapter.py @@ -7,7 +7,7 @@ import torch from invokeai.backend.ip_adapter.ip_adapter import build_ip_adapter -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load import ModelLoader, ModelLoaderRegistry from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType from invokeai.backend.raw_model import RawModel diff --git a/invokeai/backend/model_manager/load/model_loaders/llava_onevision.py b/invokeai/backend/model_manager/load/model_loaders/llava_onevision.py index b508137f814..e459bbf2bb1 100644 --- a/invokeai/backend/model_manager/load/model_loaders/llava_onevision.py +++ b/invokeai/backend/model_manager/load/model_loaders/llava_onevision.py @@ -3,9 +3,7 @@ from transformers import LlavaOnevisionForConditionalGeneration -from invokeai.backend.model_manager.config import ( - AnyModelConfig, -) +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py index 98f54224fad..29fb815d541 100644 --- a/invokeai/backend/model_manager/load/model_loaders/lora.py +++ b/invokeai/backend/model_manager/load/model_loaders/lora.py @@ -9,7 +9,7 @@ from safetensors.torch import load_file from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry diff --git a/invokeai/backend/model_manager/load/model_loaders/onnx.py b/invokeai/backend/model_manager/load/model_loaders/onnx.py index 3078d622b4e..a565bb11d05 100644 --- a/invokeai/backend/model_manager/load/model_loaders/onnx.py +++ b/invokeai/backend/model_manager/load/model_loaders/onnx.py @@ -5,7 +5,7 @@ from pathlib import Path from typing import Optional -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader from invokeai.backend.model_manager.taxonomy import ( diff --git a/invokeai/backend/model_manager/load/model_loaders/sig_lip.py b/invokeai/backend/model_manager/load/model_loaders/sig_lip.py index bdf38887a3a..16b8e6c88da 100644 --- a/invokeai/backend/model_manager/load/model_loaders/sig_lip.py +++ b/invokeai/backend/model_manager/load/model_loaders/sig_lip.py @@ -3,9 +3,7 @@ from transformers import SiglipVisionModel -from invokeai.backend.model_manager.config import ( - AnyModelConfig, -) +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType diff --git a/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py b/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py index 44cb0277fc4..e6d8f429904 100644 --- a/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py +++ b/invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py @@ -3,9 +3,7 @@ import torch -from invokeai.backend.model_manager.config import ( - AnyModelConfig, -) +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.taxonomy import AnyModel, BaseModelType, ModelFormat, ModelType, SubModelType diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py index aa692478cad..d0cc5893796 100644 --- a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -4,18 +4,24 @@ from pathlib import Path from typing import Optional -from diffusers import ( - StableDiffusionInpaintPipeline, - StableDiffusionPipeline, +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline +from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl import StableDiffusionXLPipeline +from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint import ( StableDiffusionXLInpaintPipeline, - StableDiffusionXLPipeline, ) -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - CheckpointConfigBase, - DiffusersConfigBase, - MainCheckpointConfig, +from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Diffusers_Config_Base +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.configs.main import ( + Main_Checkpoint_SD1_Config, + Main_Checkpoint_SD2_Config, + Main_Checkpoint_SDXL_Config, + Main_Checkpoint_SDXLRefiner_Config, + Main_Diffusers_SD1_Config, + Main_Diffusers_SD2_Config, + Main_Diffusers_SDXL_Config, + Main_Diffusers_SDXLRefiner_Config, ) from invokeai.backend.model_manager.load.model_cache.model_cache import get_model_cache_key from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry @@ -58,7 +64,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if isinstance(config, CheckpointConfigBase): + if isinstance(config, Checkpoint_Config_Base): return self._load_from_singlefile(config, submodel_type) if submodel_type is None: @@ -66,7 +72,7 @@ def _load_model( model_path = Path(config.path) load_class = self.get_hf_load_class(model_path, submodel_type) - repo_variant = config.repo_variant if isinstance(config, DiffusersConfigBase) else None + repo_variant = config.repo_variant if isinstance(config, Diffusers_Config_Base) else None variant = repo_variant.value if repo_variant else None model_path = model_path / submodel_type.value try: @@ -107,7 +113,19 @@ def _load_from_singlefile( ModelVariantType.Normal: StableDiffusionXLPipeline, }, } - assert isinstance(config, MainCheckpointConfig) + assert isinstance( + config, + ( + Main_Diffusers_SD1_Config, + Main_Diffusers_SD2_Config, + Main_Diffusers_SDXL_Config, + Main_Diffusers_SDXLRefiner_Config, + Main_Checkpoint_SD1_Config, + Main_Checkpoint_SD2_Config, + Main_Checkpoint_SDXL_Config, + Main_Checkpoint_SDXLRefiner_Config, + ), + ) try: load_class = load_classes[config.base][config.variant] except KeyError as e: diff --git a/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py b/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py index 60ae4ea08b7..2d0411a8df2 100644 --- a/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py +++ b/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py @@ -4,7 +4,7 @@ from pathlib import Path from typing import Optional -from invokeai.backend.model_manager.config import AnyModelConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.taxonomy import ( diff --git a/invokeai/backend/model_manager/load/model_loaders/vae.py b/invokeai/backend/model_manager/load/model_loaders/vae.py index 365fa0a547c..e91903ccdad 100644 --- a/invokeai/backend/model_manager/load/model_loaders/vae.py +++ b/invokeai/backend/model_manager/load/model_loaders/vae.py @@ -3,9 +3,10 @@ from typing import Optional -from diffusers import AutoencoderKL +from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL -from invokeai.backend.model_manager.config import AnyModelConfig, VAECheckpointConfig +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.configs.vae import VAE_Checkpoint_Config_Base from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader from invokeai.backend.model_manager.taxonomy import ( @@ -27,7 +28,7 @@ def _load_model( config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, ) -> AnyModel: - if isinstance(config, VAECheckpointConfig): + if isinstance(config, VAE_Checkpoint_Config_Base): return AutoencoderKL.from_single_file( config.path, torch_dtype=self._torch_dtype, diff --git a/invokeai/backend/model_manager/merge.py b/invokeai/backend/model_manager/merge.py deleted file mode 100644 index 03056b10f59..00000000000 --- a/invokeai/backend/model_manager/merge.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -invokeai.backend.model_manager.merge exports: -merge_diffusion_models() -- combine multiple models by location and return a pipeline object -merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to the models tables - -Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team -""" - -import warnings -from enum import Enum -from pathlib import Path -from typing import Any, List, Optional, Set - -import torch -from diffusers import AutoPipelineForText2Image -from diffusers.utils import logging as dlogging - -from invokeai.app.services.model_install import ModelInstallServiceBase -from invokeai.app.services.model_records.model_records_base import ModelRecordChanges -from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, ModelType, ModelVariantType -from invokeai.backend.model_manager.config import MainDiffusersConfig -from invokeai.backend.util.devices import TorchDevice - - -class MergeInterpolationMethod(str, Enum): - WeightedSum = "weighted_sum" - Sigmoid = "sigmoid" - InvSigmoid = "inv_sigmoid" - AddDifference = "add_difference" - - -class ModelMerger(object): - """Wrapper class for model merge function.""" - - def __init__(self, installer: ModelInstallServiceBase): - """ - Initialize a ModelMerger object with the model installer. - """ - self._installer = installer - self._dtype = TorchDevice.choose_torch_dtype() - - def merge_diffusion_models( - self, - model_paths: List[Path], - alpha: float = 0.5, - interp: Optional[MergeInterpolationMethod] = None, - force: bool = False, - variant: Optional[str] = None, - **kwargs: Any, - ) -> Any: # pipe.merge is an untyped function. - """ - :param model_paths: up to three models, designated by their local paths or HuggingFace repo_ids - :param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha - would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2 - :param interp: The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_difference" and None. - Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported. - :param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False. - - **kwargs - the default DiffusionPipeline.get_config_dict kwargs: - cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map - """ - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - verbosity = dlogging.get_verbosity() - dlogging.set_verbosity_error() - dtype = torch.float16 if variant == "fp16" else self._dtype - - # Note that checkpoint_merger will not work with downloaded HuggingFace fp16 models - # until upstream https://github.com/huggingface/diffusers/pull/6670 is merged and released. - pipe = AutoPipelineForText2Image.from_pretrained( - model_paths[0], - custom_pipeline="checkpoint_merger", - torch_dtype=dtype, - variant=variant, - ) # type: ignore - merged_pipe = pipe.merge( - pretrained_model_name_or_path_list=model_paths, - alpha=alpha, - interp=interp.value if interp else None, # diffusers API treats None as "weighted sum" - force=force, - torch_dtype=dtype, - variant=variant, - **kwargs, - ) - dlogging.set_verbosity(verbosity) - return merged_pipe - - def merge_diffusion_models_and_save( - self, - model_keys: List[str], - merged_model_name: str, - alpha: float = 0.5, - force: bool = False, - interp: Optional[MergeInterpolationMethod] = None, - merge_dest_directory: Optional[Path] = None, - variant: Optional[str] = None, - **kwargs: Any, - ) -> AnyModelConfig: - """ - :param models: up to three models, designated by their registered InvokeAI model name - :param merged_model_name: name for new model - :param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha - would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2 - :param interp: The interpolation method to use for the merging. Supports "weighted_average", "sigmoid", "inv_sigmoid", "add_difference" and None. - Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported. Add_difference is A+(B-C). - :param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False. - :param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended) - **kwargs - the default DiffusionPipeline.get_config_dict kwargs: - cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map - """ - model_paths: List[Path] = [] - model_names: List[str] = [] - config = self._installer.app_config - store = self._installer.record_store - base_models: Set[BaseModelType] = set() - variant = None if self._installer.app_config.precision == "float32" else "fp16" - - assert len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference, ( - "When merging three models, only the 'add_difference' merge method is supported" - ) - - for key in model_keys: - info = store.get_model(key) - model_names.append(info.name) - assert isinstance(info, MainDiffusersConfig), ( - f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging" - ) - assert info.variant == ModelVariantType("normal"), ( - f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged" - ) - - # tally base models used - base_models.add(info.base) - model_paths.extend([config.models_path / info.path]) - - assert len(base_models) == 1, f"All models to merge must have same base model, but found bases {base_models}" - base_model = base_models.pop() - - merge_method = None if interp == "weighted_sum" else MergeInterpolationMethod(interp) - merged_pipe = self.merge_diffusion_models(model_paths, alpha, merge_method, force, variant=variant, **kwargs) - dump_path = ( - Path(merge_dest_directory) - if merge_dest_directory - else config.models_path / base_model.value / ModelType.Main.value - ) - dump_path.mkdir(parents=True, exist_ok=True) - dump_path = dump_path / merged_model_name - - dtype = torch.float16 if variant == "fp16" else self._dtype - merged_pipe.save_pretrained(dump_path.as_posix(), safe_serialization=True, torch_dtype=dtype, variant=variant) - - # register model and get its unique key - key = self._installer.register_path(dump_path) - - # update model's config - model_config = self._installer.record_store.get_model(key) - model_config.name = merged_model_name - model_config.description = f"Merge of models {', '.join(model_names)}" - - self._installer.record_store.update_model( - key, ModelRecordChanges(name=model_config.name, description=model_config.description) - ) - return model_config diff --git a/invokeai/backend/model_manager/model_on_disk.py b/invokeai/backend/model_manager/model_on_disk.py index 502ca596a62..a86e94d3a4c 100644 --- a/invokeai/backend/model_manager/model_on_disk.py +++ b/invokeai/backend/model_manager/model_on_disk.py @@ -30,7 +30,8 @@ def __init__(self, path: Path, hash_algo: HASHING_ALGORITHMS = "blake3_single"): self.hash_algo = hash_algo # Having a cache helps users of ModelOnDisk (i.e. configs) to save state # This prevents redundant computations during matching and parsing - self.cache = {"_CACHED_STATE_DICTS": {}} + self._state_dict_cache: dict[Path, Any] = {} + self._metadata_cache: dict[Path, Any] = {} def hash(self) -> str: return ModelHash(algorithm=self.hash_algo).hash(self.path) @@ -47,13 +48,18 @@ def weight_files(self) -> set[Path]: return {f for f in self.path.rglob("*") if f.suffix in extensions} def metadata(self, path: Optional[Path] = None) -> dict[str, str]: + path = path or self.path + if path in self._metadata_cache: + return self._metadata_cache[path] try: with safe_open(self.path, framework="pt", device="cpu") as f: metadata = f.metadata() assert isinstance(metadata, dict) - return metadata except Exception: - return {} + metadata = {} + + self._metadata_cache[path] = metadata + return metadata def repo_variant(self) -> Optional[ModelRepoVariant]: if self.path.is_file(): @@ -73,10 +79,8 @@ def repo_variant(self) -> Optional[ModelRepoVariant]: return ModelRepoVariant.Default def load_state_dict(self, path: Optional[Path] = None) -> StateDict: - sd_cache = self.cache["_CACHED_STATE_DICTS"] - - if path in sd_cache: - return sd_cache[path] + if path in self._state_dict_cache: + return self._state_dict_cache[path] path = self.resolve_weight_file(path) @@ -111,7 +115,7 @@ def load_state_dict(self, path: Optional[Path] = None) -> StateDict: raise ValueError(f"Unrecognized model extension: {path.suffix}") state_dict = checkpoint.get("state_dict", checkpoint) - sd_cache[path] = state_dict + self._state_dict_cache[path] = state_dict return state_dict def resolve_weight_file(self, path: Optional[Path] = None) -> Path: diff --git a/invokeai/backend/model_manager/single_file_config_files.py b/invokeai/backend/model_manager/single_file_config_files.py new file mode 100644 index 00000000000..fa4b9e934b8 --- /dev/null +++ b/invokeai/backend/model_manager/single_file_config_files.py @@ -0,0 +1,93 @@ +from dataclasses import dataclass + +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.taxonomy import ( + BaseModelType, + ModelType, + ModelVariantType, + SchedulerPredictionType, +) + + +@dataclass(frozen=True) +class LegacyConfigKey: + type: ModelType + base: BaseModelType + variant: ModelVariantType | None = None + pred: SchedulerPredictionType | None = None + + @classmethod + def from_model_config(cls, config: AnyModelConfig) -> "LegacyConfigKey": + variant = getattr(config, "variant", None) + pred = getattr(config, "prediction_type", None) + return cls(type=config.type, base=config.base, variant=variant, pred=pred) + + +LEGACY_CONFIG_MAP: dict[LegacyConfigKey, str] = { + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusion1, + ModelVariantType.Normal, + SchedulerPredictionType.Epsilon, + ): "stable-diffusion/v1-inference.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusion1, + ModelVariantType.Normal, + SchedulerPredictionType.VPrediction, + ): "stable-diffusion/v1-inference-v.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusion1, + ModelVariantType.Inpaint, + ): "stable-diffusion/v1-inpainting-inference.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusion2, + ModelVariantType.Normal, + SchedulerPredictionType.Epsilon, + ): "stable-diffusion/v2-inference.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusion2, + ModelVariantType.Normal, + SchedulerPredictionType.VPrediction, + ): "stable-diffusion/v2-inference-v.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusion2, + ModelVariantType.Inpaint, + SchedulerPredictionType.Epsilon, + ): "stable-diffusion/v2-inpainting-inference.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusion2, + ModelVariantType.Inpaint, + SchedulerPredictionType.VPrediction, + ): "stable-diffusion/v2-inpainting-inference-v.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusion2, + ModelVariantType.Depth, + ): "stable-diffusion/v2-midas-inference.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusionXL, + ModelVariantType.Normal, + ): "stable-diffusion/sd_xl_base.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusionXL, + ModelVariantType.Inpaint, + ): "stable-diffusion/sd_xl_inpaint.yaml", + LegacyConfigKey( + ModelType.Main, + BaseModelType.StableDiffusionXLRefiner, + ModelVariantType.Normal, + ): "stable-diffusion/sd_xl_refiner.yaml", + LegacyConfigKey(ModelType.ControlNet, BaseModelType.StableDiffusion1): "controlnet/cldm_v15.yaml", + LegacyConfigKey(ModelType.ControlNet, BaseModelType.StableDiffusion2): "controlnet/cldm_v21.yaml", + LegacyConfigKey(ModelType.VAE, BaseModelType.StableDiffusion1): "stable-diffusion/v1-inference.yaml", + LegacyConfigKey(ModelType.VAE, BaseModelType.StableDiffusion2): "stable-diffusion/v2-inference.yaml", + LegacyConfigKey(ModelType.VAE, BaseModelType.StableDiffusionXL): "stable-diffusion/sd_xl_base.yaml", +} diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py index 07f8c8f5def..99a31f438d1 100644 --- a/invokeai/backend/model_manager/taxonomy.py +++ b/invokeai/backend/model_manager/taxonomy.py @@ -1,39 +1,70 @@ from enum import Enum from typing import Dict, TypeAlias, Union -import diffusers import onnxruntime as ort import torch -from diffusers import ModelMixin +from diffusers.models.modeling_utils import ModelMixin +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from pydantic import TypeAdapter from invokeai.backend.raw_model import RawModel # ModelMixin is the base class for all diffusers and transformers models # RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime -AnyModel = Union[ - ModelMixin, RawModel, torch.nn.Module, Dict[str, torch.Tensor], diffusers.DiffusionPipeline, ort.InferenceSession +AnyModel: TypeAlias = Union[ + ModelMixin, + RawModel, + torch.nn.Module, + Dict[str, torch.Tensor], + DiffusionPipeline, + ort.InferenceSession, ] +"""Type alias for any kind of runtime, in-memory model representation. For example, a torch module or diffusers pipeline.""" class BaseModelType(str, Enum): - """Base model type.""" + """An enumeration of base model architectures. For example, Stable Diffusion 1.x, Stable Diffusion 2.x, FLUX, etc. + + Every model config must have a base architecture type. + + Not all models are associated with a base architecture. For example, CLIP models are their own thing, not related + to any particular model architecture. To simplify internal APIs and make it easier to work with models, we use a + fallback/null value `BaseModelType.Any` for these models, instead of making the model base optional.""" Any = "any" + """`Any` is essentially a fallback/null value for models with no base architecture association. + For example, CLIP models are not related to Stable Diffusion, FLUX, or any other model arch.""" StableDiffusion1 = "sd-1" + """Indicates the model is associated with the Stable Diffusion 1.x model architecture, including 1.4 and 1.5.""" StableDiffusion2 = "sd-2" + """Indicates the model is associated with the Stable Diffusion 2.x model architecture, including 2.0 and 2.1.""" StableDiffusion3 = "sd-3" + """Indicates the model is associated with the Stable Diffusion 3.5 model architecture.""" StableDiffusionXL = "sdxl" + """Indicates the model is associated with the Stable Diffusion XL model architecture.""" StableDiffusionXLRefiner = "sdxl-refiner" + """Indicates the model is associated with the Stable Diffusion XL Refiner model architecture.""" Flux = "flux" + """Indicates the model is associated with FLUX.1 model architecture, including FLUX Dev, Schnell and Fill.""" CogView4 = "cogview4" + """Indicates the model is associated with CogView 4 model architecture.""" Imagen3 = "imagen3" + """Indicates the model is associated with Google Imagen 3 model architecture. This is an external API model.""" Imagen4 = "imagen4" + """Indicates the model is associated with Google Imagen 4 model architecture. This is an external API model.""" Gemini2_5 = "gemini-2.5" + """Indicates the model is associated with Google Gemini 2.5 Flash Image model architecture. This is an external API model.""" ChatGPT4o = "chatgpt-4o" + """Indicates the model is associated with OpenAI ChatGPT 4o Image model architecture. This is an external API model.""" FluxKontext = "flux-kontext" + """Indicates the model is associated with FLUX Kontext model architecture. This is an external API model; local FLUX + Kontext models use the base `Flux`.""" Veo3 = "veo3" + """Indicates the model is associated with Google Veo 3 video model architecture. This is an external API model.""" Runway = "runway" + """Indicates the model is associated with Runway video model architecture. This is an external API model.""" Unknown = "unknown" + """Indicates the model's base architecture is unknown.""" class ModelType(str, Enum): @@ -92,6 +123,12 @@ class ModelVariantType(str, Enum): Depth = "depth" +class FluxVariantType(str, Enum): + Schnell = "schnell" + Dev = "dev" + DevFill = "dev_fill" + + class ModelFormat(str, Enum): """Storage format of model.""" @@ -149,4 +186,7 @@ class FluxLoRAFormat(str, Enum): AIToolkit = "flux.aitoolkit" -AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None] +AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, FluxVariantType] +variant_type_adapter = TypeAdapter[ModelVariantType | ClipVariantType | FluxVariantType]( + ModelVariantType | ClipVariantType | FluxVariantType +) diff --git a/invokeai/backend/model_manager/util/lora_metadata_extractor.py b/invokeai/backend/model_manager/util/lora_metadata_extractor.py index 842e78a7880..12b10739354 100644 --- a/invokeai/backend/model_manager/util/lora_metadata_extractor.py +++ b/invokeai/backend/model_manager/util/lora_metadata_extractor.py @@ -8,7 +8,8 @@ from PIL import Image from invokeai.app.util.thumbnails import make_thumbnail -from invokeai.backend.model_manager.config import AnyModelConfig, ModelType +from invokeai.backend.model_manager.configs.factory import AnyModelConfig +from invokeai.backend.model_manager.taxonomy import ModelType logger = logging.getLogger(__name__) diff --git a/invokeai/backend/model_manager/util/model_util.py b/invokeai/backend/model_manager/util/model_util.py index 4fa095b5999..c153129353b 100644 --- a/invokeai/backend/model_manager/util/model_util.py +++ b/invokeai/backend/model_manager/util/model_util.py @@ -83,14 +83,14 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str, return checkpoint -def lora_token_vector_length(checkpoint: Dict[str, torch.Tensor]) -> Optional[int]: +def lora_token_vector_length(checkpoint: dict[str | int, torch.Tensor]) -> Optional[int]: """ Given a checkpoint in memory, return the lora token vector length :param checkpoint: The checkpoint """ - def _get_shape_1(key: str, tensor: torch.Tensor, checkpoint: Dict[str, torch.Tensor]) -> Optional[int]: + def _get_shape_1(key: str, tensor: torch.Tensor, checkpoint: dict[str | int, torch.Tensor]) -> Optional[int]: lora_token_vector_length = None if "." not in key: @@ -136,6 +136,8 @@ def _get_shape_1(key: str, tensor: torch.Tensor, checkpoint: Dict[str, torch.Ten lora_te1_length = None lora_te2_length = None for key, tensor in checkpoint.items(): + if isinstance(key, int): + continue if key.startswith("lora_unet_") and ("_attn2_to_k." in key or "_attn2_to_v." in key): lora_token_vector_length = _get_shape_1(key, tensor, checkpoint) elif key.startswith("lora_unet_") and ( diff --git a/invokeai/backend/model_patcher.py b/invokeai/backend/model_patcher.py index a1d8bbed0a5..04f99495609 100644 --- a/invokeai/backend/model_patcher.py +++ b/invokeai/backend/model_patcher.py @@ -5,10 +5,10 @@ import pickle from contextlib import contextmanager -from typing import Any, Iterator, List, Optional, Tuple, Type, Union +from typing import Any, Generator, Iterator, List, Optional, Tuple, Type, Union import torch -from diffusers import UNet2DConditionModel +from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from invokeai.app.shared.models import FreeUConfig @@ -146,7 +146,7 @@ def apply_clip_skip( cls, text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection], clip_skip: int, - ) -> None: + ) -> Generator[None, Any, Any]: skipped_layers = [] try: for _i in range(clip_skip): @@ -164,7 +164,7 @@ def apply_freeu( cls, unet: UNet2DConditionModel, freeu_config: Optional[FreeUConfig] = None, - ) -> None: + ) -> Generator[None, Any, Any]: did_apply_freeu = False try: assert hasattr(unet, "enable_freeu") # mypy doesn't pick up this attribute? diff --git a/invokeai/backend/patches/lora_conversions/flux_aitoolkit_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/flux_aitoolkit_lora_conversion_utils.py index 6ca06a0355f..f3c202268a7 100644 --- a/invokeai/backend/patches/lora_conversions/flux_aitoolkit_lora_conversion_utils.py +++ b/invokeai/backend/patches/lora_conversions/flux_aitoolkit_lora_conversion_utils.py @@ -12,7 +12,10 @@ from invokeai.backend.util import InvokeAILogger -def is_state_dict_likely_in_flux_aitoolkit_format(state_dict: dict[str, Any], metadata: dict[str, Any] = None) -> bool: +def is_state_dict_likely_in_flux_aitoolkit_format( + state_dict: dict[str | int, Any], + metadata: dict[str, Any] | None = None, +) -> bool: if metadata: try: software = json.loads(metadata.get("software", "{}")) @@ -20,7 +23,7 @@ def is_state_dict_likely_in_flux_aitoolkit_format(state_dict: dict[str, Any], me return False return software.get("name") == "ai-toolkit" # metadata got lost somewhere - return any("diffusion_model" == k.split(".", 1)[0] for k in state_dict.keys()) + return any("diffusion_model" == k.split(".", 1)[0] for k in state_dict.keys() if isinstance(k, str)) @dataclass diff --git a/invokeai/backend/patches/lora_conversions/flux_control_lora_utils.py b/invokeai/backend/patches/lora_conversions/flux_control_lora_utils.py index fa9cc764628..1762a4d5f4c 100644 --- a/invokeai/backend/patches/lora_conversions/flux_control_lora_utils.py +++ b/invokeai/backend/patches/lora_conversions/flux_control_lora_utils.py @@ -18,14 +18,16 @@ FLUX_CONTROL_TRANSFORMER_KEY_REGEX = r"(\w+\.)+(lora_A\.weight|lora_B\.weight|lora_B\.bias|scale)" -def is_state_dict_likely_flux_control(state_dict: Dict[str, Any]) -> bool: +def is_state_dict_likely_flux_control(state_dict: dict[str | int, Any]) -> bool: """Checks if the provided state dict is likely in the FLUX Control LoRA format. This is intended to be a high-precision detector, but it is not guaranteed to have perfect precision. (A perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.) """ - all_keys_match = all(re.match(FLUX_CONTROL_TRANSFORMER_KEY_REGEX, str(k)) for k in state_dict.keys()) + all_keys_match = all( + re.match(FLUX_CONTROL_TRANSFORMER_KEY_REGEX, k) for k in state_dict.keys() if isinstance(k, str) + ) # Check the shape of the img_in weight, because this layer shape is modified by FLUX control LoRAs. lora_a_weight = state_dict.get("img_in.lora_A.weight", None) diff --git a/invokeai/backend/patches/lora_conversions/flux_diffusers_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/flux_diffusers_lora_conversion_utils.py index 188d118cc4d..f5b4bc66847 100644 --- a/invokeai/backend/patches/lora_conversions/flux_diffusers_lora_conversion_utils.py +++ b/invokeai/backend/patches/lora_conversions/flux_diffusers_lora_conversion_utils.py @@ -9,14 +9,16 @@ from invokeai.backend.patches.model_patch_raw import ModelPatchRaw -def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Tensor]) -> bool: +def is_state_dict_likely_in_flux_diffusers_format(state_dict: dict[str | int, torch.Tensor]) -> bool: """Checks if the provided state dict is likely in the Diffusers FLUX LoRA format. This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision. (A perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.) """ # First, check that all keys end in "lora_A.weight" or "lora_B.weight" (i.e. are in PEFT format). - all_keys_in_peft_format = all(k.endswith(("lora_A.weight", "lora_B.weight")) for k in state_dict.keys()) + all_keys_in_peft_format = all( + k.endswith(("lora_A.weight", "lora_B.weight")) for k in state_dict.keys() if isinstance(k, str) + ) # Check if keys use transformer prefix transformer_prefix_keys = [ diff --git a/invokeai/backend/patches/lora_conversions/flux_kohya_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/flux_kohya_lora_conversion_utils.py index 7b5f3468963..f5a6830c4f1 100644 --- a/invokeai/backend/patches/lora_conversions/flux_kohya_lora_conversion_utils.py +++ b/invokeai/backend/patches/lora_conversions/flux_kohya_lora_conversion_utils.py @@ -44,7 +44,7 @@ FLUX_KOHYA_T5_KEY_REGEX = r"lora_te2_encoder_block_(\d+)_layer_(\d+)_(DenseReluDense|SelfAttention)_(\w+)_?(\w+)?\.?.*" -def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> bool: +def is_state_dict_likely_in_flux_kohya_format(state_dict: dict[str | int, Any]) -> bool: """Checks if the provided state dict is likely in the Kohya FLUX LoRA format. This is intended to be a high-precision detector, but it is not guaranteed to have perfect precision. (A @@ -56,6 +56,7 @@ def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> boo or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k) or re.match(FLUX_KOHYA_T5_KEY_REGEX, k) for k in state_dict.keys() + if isinstance(k, str) ) diff --git a/invokeai/backend/patches/lora_conversions/flux_onetrainer_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/flux_onetrainer_lora_conversion_utils.py index 0413f0ef49f..88aeee95e49 100644 --- a/invokeai/backend/patches/lora_conversions/flux_onetrainer_lora_conversion_utils.py +++ b/invokeai/backend/patches/lora_conversions/flux_onetrainer_lora_conversion_utils.py @@ -40,7 +40,7 @@ ) -def is_state_dict_likely_in_flux_onetrainer_format(state_dict: Dict[str, Any]) -> bool: +def is_state_dict_likely_in_flux_onetrainer_format(state_dict: dict[str | int, Any]) -> bool: """Checks if the provided state dict is likely in the OneTrainer FLUX LoRA format. This is intended to be a high-precision detector, but it is not guaranteed to have perfect precision. (A @@ -53,6 +53,7 @@ def is_state_dict_likely_in_flux_onetrainer_format(state_dict: Dict[str, Any]) - or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k) or re.match(FLUX_KOHYA_T5_KEY_REGEX, k) for k in state_dict.keys() + if isinstance(k, str) ) diff --git a/invokeai/backend/patches/lora_conversions/formats.py b/invokeai/backend/patches/lora_conversions/formats.py index 94f71e05ee6..4cde7c98f67 100644 --- a/invokeai/backend/patches/lora_conversions/formats.py +++ b/invokeai/backend/patches/lora_conversions/formats.py @@ -1,3 +1,5 @@ +from typing import Any + from invokeai.backend.model_manager.taxonomy import FluxLoRAFormat from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import ( is_state_dict_likely_in_flux_aitoolkit_format, @@ -14,7 +16,10 @@ ) -def flux_format_from_state_dict(state_dict: dict, metadata: dict | None = None) -> FluxLoRAFormat | None: +def flux_format_from_state_dict( + state_dict: dict[str | int, Any], + metadata: dict[str, Any] | None = None, +) -> FluxLoRAFormat | None: if is_state_dict_likely_in_flux_kohya_format(state_dict): return FluxLoRAFormat.Kohya elif is_state_dict_likely_in_flux_onetrainer_format(state_dict): diff --git a/invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py b/invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py index 045ebbbf2c4..8231e313fdc 100644 --- a/invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py +++ b/invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py @@ -4,7 +4,8 @@ from safetensors.torch import load_file, save_file from invokeai.backend.flux.model import Flux -from invokeai.backend.flux.util import params +from invokeai.backend.flux.util import get_flux_transformers_params +from invokeai.backend.model_manager.taxonomy import ModelVariantType from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8 from invokeai.backend.quantization.scripts.load_flux_model_bnb_nf4 import log_time @@ -22,7 +23,7 @@ def main(): with log_time("Initialize FLUX transformer on meta device"): # TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config. - p = params["flux-schnell"] + p = get_flux_transformers_params(ModelVariantType.FluxSchnell) # Initialize the model on the "meta" device. with accelerate.init_empty_weights(): diff --git a/invokeai/backend/quantization/scripts/load_flux_model_bnb_nf4.py b/invokeai/backend/quantization/scripts/load_flux_model_bnb_nf4.py index c8802b9e49e..6a4ee3abf93 100644 --- a/invokeai/backend/quantization/scripts/load_flux_model_bnb_nf4.py +++ b/invokeai/backend/quantization/scripts/load_flux_model_bnb_nf4.py @@ -7,7 +7,8 @@ from safetensors.torch import load_file, save_file from invokeai.backend.flux.model import Flux -from invokeai.backend.flux.util import params +from invokeai.backend.flux.util import get_flux_transformers_params +from invokeai.backend.model_manager.taxonomy import ModelVariantType from invokeai.backend.quantization.bnb_nf4 import quantize_model_nf4 @@ -35,7 +36,7 @@ def main(): # inference_dtype = torch.bfloat16 with log_time("Initialize FLUX transformer on meta device"): # TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config. - p = params["flux-schnell"] + p = get_flux_transformers_params(ModelVariantType.FluxSchnell) # Initialize the model on the "meta" device. with accelerate.init_empty_weights(): diff --git a/invokeai/backend/util/hotfixes.py b/invokeai/backend/util/hotfixes.py index 95f2c904ad8..7e258b87795 100644 --- a/invokeai/backend/util/hotfixes.py +++ b/invokeai/backend/util/hotfixes.py @@ -23,6 +23,7 @@ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel from torch import nn +from invokeai.backend.model_manager.taxonomy import BaseModelType, SchedulerPredictionType from invokeai.backend.util.logging import InvokeAILogger # TODO: create PR to diffusers @@ -407,7 +408,8 @@ def from_unet( use_linear_projection=unet.config.use_linear_projection, class_embed_type=unet.config.class_embed_type, num_class_embeds=unet.config.num_class_embeds, - upcast_attention=unet.config.upcast_attention, + upcast_attention=unet.config.base is BaseModelType.StableDiffusion2 + and unet.config.prediction_type is SchedulerPredictionType.VPrediction, resnet_time_scale_shift=unet.config.resnet_time_scale_shift, projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, diff --git a/invokeai/backend/util/test_utils.py b/invokeai/backend/util/test_utils.py index add394e71be..e4208dc848f 100644 --- a/invokeai/backend/util/test_utils.py +++ b/invokeai/backend/util/test_utils.py @@ -7,7 +7,8 @@ from invokeai.app.services.model_manager import ModelManagerServiceBase from invokeai.app.services.model_records import UnknownModelException -from invokeai.backend.model_manager import BaseModelType, LoadedModel, ModelType, SubModelType +from invokeai.backend.model_manager.load.load_base import LoadedModel +from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType @pytest.fixture(scope="session") diff --git a/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx b/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx index bf4464bd5bd..49a289b875c 100644 --- a/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx +++ b/invokeai/frontend/web/src/features/controlLayers/components/ParamDenoisingStrength.tsx @@ -17,6 +17,7 @@ import { selectImg2imgStrengthConfig } from 'features/system/store/configSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { useSelectedModelConfig } from 'services/api/hooks/useSelectedModelConfig'; +import { isFluxFillMainModelModelConfig } from 'services/api/types'; const selectHasRasterLayersWithContent = createSelector( selectActiveRasterLayerEntities, @@ -46,11 +47,7 @@ export const ParamDenoisingStrength = memo(() => { // Denoising strength does nothing if there are no raster layers w/ content return true; } - if ( - selectedModelConfig?.type === 'main' && - selectedModelConfig?.base === 'flux' && - selectedModelConfig.variant === 'inpaint' - ) { + if (selectedModelConfig && isFluxFillMainModelModelConfig(selectedModelConfig)) { // Denoising strength is ignored by FLUX Fill, which is indicated by the variant being 'inpaint' return true; } diff --git a/invokeai/frontend/web/src/features/controlLayers/store/validators.ts b/invokeai/frontend/web/src/features/controlLayers/store/validators.ts index 03ef5404a6d..197a3d6e3e3 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/validators.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/validators.ts @@ -154,7 +154,7 @@ export const getControlLayerWarnings = ( warnings.push(WARNINGS.CONTROL_ADAPTER_INCOMPATIBLE_BASE_MODEL); } else if ( model.base === 'flux' && - model.variant === 'inpaint' && + model.variant === 'dev_fill' && entity.controlAdapter.model.type === 'control_lora' ) { // FLUX inpaint variants are FLUX Fill models - not compatible w/ Control LoRA diff --git a/invokeai/frontend/web/src/features/modelManagerV2/models.ts b/invokeai/frontend/web/src/features/modelManagerV2/models.ts index ec4ddf1a1d5..0b4096e010b 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/models.ts +++ b/invokeai/frontend/web/src/features/modelManagerV2/models.ts @@ -1,4 +1,4 @@ -import type { BaseModelType, ModelFormat, ModelType, ModelVariantType } from 'features/nodes/types/common'; +import type { AnyModelVariant, BaseModelType, ModelFormat, ModelType } from 'features/nodes/types/common'; import type { AnyModelConfig } from 'services/api/types'; import { isCLIPEmbedModelConfig, @@ -219,10 +219,15 @@ export const MODEL_BASE_TO_SHORT_NAME: Record = { unknown: 'Unknown', }; -export const MODEL_VARIANT_TO_LONG_NAME: Record = { +export const MODEL_VARIANT_TO_LONG_NAME: Record = { normal: 'Normal', inpaint: 'Inpaint', depth: 'Depth', + dev: 'FLUX Dev', + dev_fill: 'FLUX Dev - Fill', + schnell: 'FLUX Schnell', + large: 'CLIP L', + gigantic: 'CLIP G', }; export const MODEL_FORMAT_TO_LONG_NAME: Record = { diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx index 87923f9f00e..e139639f1f0 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelFormatBadge.tsx @@ -1,12 +1,12 @@ import { Badge } from '@invoke-ai/ui-library'; +import type { ModelFormat } from 'features/nodes/types/common'; import { memo } from 'react'; -import type { AnyModelConfig } from 'services/api/types'; type Props = { - format: AnyModelConfig['format']; + format: ModelFormat; }; -const FORMAT_NAME_MAP: Record = { +const FORMAT_NAME_MAP: Record = { diffusers: 'diffusers', lycoris: 'lycoris', checkpoint: 'checkpoint', @@ -20,9 +20,11 @@ const FORMAT_NAME_MAP: Record = { api: 'api', omi: 'omi', unknown: 'unknown', + olive: 'olive', + onnx: 'onnx', }; -const FORMAT_COLOR_MAP: Record = { +const FORMAT_COLOR_MAP: Record = { diffusers: 'base', omi: 'base', lycoris: 'base', @@ -36,6 +38,8 @@ const FORMAT_COLOR_MAP: Record = { gguf_quantized: 'base', api: 'base', unknown: 'red', + olive: 'base', + onnx: 'base', }; const ModelFormatBadge = ({ format }: Props) => { diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/ModelView.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/ModelView.tsx index 75b3ba4bc40..538ebf597e0 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/ModelView.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelPanel/ModelView.tsx @@ -56,18 +56,17 @@ export const ModelView = memo(({ modelConfig }: Props) => { - {modelConfig.type === 'main' && ( + {modelConfig.type === 'main' && 'variant' in modelConfig && ( )} {modelConfig.type === 'main' && modelConfig.format === 'diffusers' && modelConfig.repo_variant && ( )} {modelConfig.type === 'main' && modelConfig.format === 'checkpoint' && ( - <> - - - - + + )} + {modelConfig.type === 'main' && modelConfig.format === 'checkpoint' && 'prediction_type' in modelConfig && ( + )} {modelConfig.type === 'ip_adapter' && modelConfig.format === 'invokeai' && ( diff --git a/invokeai/frontend/web/src/features/nodes/types/common.test-d.ts b/invokeai/frontend/web/src/features/nodes/types/common.test-d.ts index e3fa3772bb8..c223747b931 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.test-d.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.test-d.ts @@ -12,6 +12,7 @@ import type { T2IAdapterField, zBaseModelType, zClipVariantType, + zFluxVariantType, zModelFormat, zModelVariantType, zSubModelType, @@ -45,6 +46,7 @@ describe('Common types', () => { test('ModelIdentifier', () => assert, S['SubModelType']>>()); test('ClipVariantType', () => assert, S['ClipVariantType']>>()); test('ModelVariantType', () => assert, S['ModelVariantType']>>()); + test('FluxVariantType', () => assert, S['FluxVariantType']>>()); test('ModelFormat', () => assert, S['ModelFormat']>>()); // Misc types diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index 4b97c2145d8..c51defd79c5 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -148,7 +148,9 @@ export const zSubModelType = z.enum([ export const zClipVariantType = z.enum(['large', 'gigantic']); export const zModelVariantType = z.enum(['normal', 'inpaint', 'depth']); -export type ModelVariantType = z.infer; +export const zFluxVariantType = z.enum(['dev', 'dev_fill', 'schnell']); +export const zAnyModelVariant = z.union([zModelVariantType, zClipVariantType, zFluxVariantType]); +export type AnyModelVariant = z.infer; export const zModelFormat = z.enum([ 'omi', 'diffusers', diff --git a/invokeai/frontend/web/src/features/nodes/types/field.ts b/invokeai/frontend/web/src/features/nodes/types/field.ts index 320a4ac521c..5b8634daa2b 100644 --- a/invokeai/frontend/web/src/features/nodes/types/field.ts +++ b/invokeai/frontend/web/src/features/nodes/types/field.ts @@ -10,15 +10,14 @@ import { z } from 'zod'; import type { ImageField } from './common'; import { + zAnyModelVariant, zBaseModelType, zBoardField, - zClipVariantType, zColorField, zImageField, zModelFormat, zModelIdentifierField, zModelType, - zModelVariantType, zSchedulerField, } from './common'; @@ -73,7 +72,7 @@ const zFieldInputTemplateBase = zFieldTemplateBase.extend({ ui_choice_labels: z.record(z.string(), z.string()).nullish(), ui_model_base: z.array(zBaseModelType).nullish(), ui_model_type: z.array(zModelType).nullish(), - ui_model_variant: z.array(zModelVariantType.or(zClipVariantType)).nullish(), + ui_model_variant: z.array(zAnyModelVariant).nullish(), ui_model_format: z.array(zModelFormat).nullish(), }); const zFieldOutputTemplateBase = zFieldTemplateBase.extend({ diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/Graph.test.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/Graph.test.ts index 24ef7123576..42d66f0fc81 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/Graph.test.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/Graph.test.ts @@ -660,6 +660,7 @@ describe('Graph', () => { cover_image: null, type: 'main', trigger_phrases: null, + prediction_type: 'epsilon', default_settings: { vae: null, vae_precision: null, @@ -673,6 +674,7 @@ describe('Graph', () => { variant: 'inpaint', format: 'diffusers', repo_variant: 'fp16', + usage_info: null, }); expect(field).toEqual({ key: 'b00ee8df-523d-40d2-9578-597283b07cb2', diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addControlAdapters.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addControlAdapters.ts index 5fcd13ba4fd..0f7163c44c2 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addControlAdapters.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addControlAdapters.ts @@ -5,7 +5,7 @@ import type { CanvasControlLayerState, Rect } from 'features/controlLayers/store import { getControlLayerWarnings } from 'features/controlLayers/store/validators'; import type { Graph } from 'features/nodes/util/graph/generation/Graph'; import { serializeError } from 'serialize-error'; -import type { ImageDTO, Invocation, MainModelConfig } from 'services/api/types'; +import type { FLUXModelConfig, ImageDTO, Invocation, MainModelConfig } from 'services/api/types'; import { assert } from 'tsafe'; const log = logger('system'); @@ -113,7 +113,7 @@ type AddControlLoRAArg = { entities: CanvasControlLayerState[]; g: Graph; rect: Rect; - model: MainModelConfig; + model: FLUXModelConfig; denoise: Invocation<'flux_denoise'>; }; @@ -129,7 +129,7 @@ export const addControlLoRA = async ({ manager, entities, g, rect, model, denois return; } - assert(model.variant !== 'inpaint', 'FLUX Control LoRA is not compatible with FLUX Fill.'); + assert(model.variant !== 'dev_fill', 'FLUX Control LoRA is not compatible with FLUX Fill.'); assert(validControlLayers.length <= 1, 'Cannot add more than one FLUX control LoRA.'); const getImageDTOResult = await withResultAsync(() => { diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.ts index b47244e5fc2..a827fff1069 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.ts @@ -49,7 +49,7 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise { const isFluxDevSelected = useMemo( () => - selectedModelConfig && - isCheckpointMainModelConfig(selectedModelConfig) && - selectedModelConfig.config_path === 'flux-dev', + selectedModelConfig && isCheckpointMainModelConfig(selectedModelConfig) && selectedModelConfig.variant === 'dev', [selectedModelConfig] ); diff --git a/invokeai/frontend/web/src/features/ui/layouts/InitialStateMainModelPicker.tsx b/invokeai/frontend/web/src/features/ui/layouts/InitialStateMainModelPicker.tsx index 9807ae9e690..b0aca495183 100644 --- a/invokeai/frontend/web/src/features/ui/layouts/InitialStateMainModelPicker.tsx +++ b/invokeai/frontend/web/src/features/ui/layouts/InitialStateMainModelPicker.tsx @@ -24,9 +24,7 @@ export const InitialStateMainModelPicker = memo(() => { const isFluxDevSelected = useMemo( () => - selectedModelConfig && - isCheckpointMainModelConfig(selectedModelConfig) && - selectedModelConfig.config_path === 'flux-dev', + selectedModelConfig && isCheckpointMainModelConfig(selectedModelConfig) && selectedModelConfig.variant === 'dev', [selectedModelConfig] ); diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 4d9f9ab2ec3..dbb194402af 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2200,97 +2200,7 @@ export type components = { */ type: "alpha_mask_to_tensor"; }; - /** - * ApiModelConfig - * @description Model config for API-based models. - */ - ApiModelConfig: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; - /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. - */ - path: string; - /** - * File Size - * @description The size of the model in bytes. - */ - file_size: number; - /** - * Name - * @description Name of the model. - */ - name: string; - /** - * Type - * @default main - * @constant - */ - type: "main"; - /** - * Format - * @default api - * @constant - */ - format: "api"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; - /** - * Source - * @description The original source of the model (path, URL or repo_id). - */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; - /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. - */ - source_api_response?: string | null; - /** - * Cover Image - * @description Url for image to preview model - */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info?: string | null; - /** - * Trigger Phrases - * @description Set of trigger phrases for this model - */ - trigger_phrases?: string[] | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["MainModelDefaultSettings"] | null; - /** - * Variant - * @default normal - */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | null; - }; + AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; /** * AppConfig * @description App Config Response @@ -2464,7 +2374,13 @@ export type components = { }; /** * BaseModelType - * @description Base model type. + * @description An enumeration of base model architectures. For example, Stable Diffusion 1.x, Stable Diffusion 2.x, FLUX, etc. + * + * Every model config must have a base architecture type. + * + * Not all models are associated with a base architecture. For example, CLIP models are their own thing, not related + * to any particular model architecture. To simplify internal APIs and make it easier to work with models, we use a + * fallback/null value `BaseModelType.Any` for these models, instead of making the model base optional. * @enum {string} */ BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "imagen3" | "imagen4" | "gemini-2.5" | "chatgpt-4o" | "flux-kontext" | "veo3" | "runway" | "unknown"; @@ -3457,28 +3373,8 @@ export type components = { */ bulk_download_item_name: string; }; - /** CLIPField */ - CLIPField: { - /** @description Info to load tokenizer submodel */ - tokenizer: components["schemas"]["ModelIdentifierField"]; - /** @description Info to load text_encoder submodel */ - text_encoder: components["schemas"]["ModelIdentifierField"]; - /** - * Skipped Layers - * @description Number of skipped layers in text_encoder - */ - skipped_layers: number; - /** - * Loras - * @description LoRAs to apply on model loading - */ - loras: components["schemas"]["LoRAField"][]; - }; - /** - * CLIPGEmbedDiffusersConfig - * @description Model config for CLIP-G Embeddings. - */ - CLIPGEmbedDiffusersConfig: { + /** CLIPEmbed_Diffusers_G_Config */ + CLIPEmbed_Diffusers_G_Config: { /** * Key * @description A unique key for this model. @@ -3505,19 +3401,10 @@ export type components = { */ name: string; /** - * Type - * @default clip_embed - * @constant - */ - type: "clip_embed"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -3525,47 +3412,50 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; + usage_info: string | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; /** @default */ - repo_variant?: components["schemas"]["ModelRepoVariant"] | null; + repo_variant: components["schemas"]["ModelRepoVariant"]; + /** + * Base + * @default any + * @constant + */ + base: "any"; + /** + * Type + * @default clip_embed + * @constant + */ + type: "clip_embed"; /** * Variant * @default gigantic * @constant */ - variant?: "gigantic"; + variant: "gigantic"; }; - /** - * CLIPLEmbedDiffusersConfig - * @description Model config for CLIP-L Embeddings. - */ - CLIPLEmbedDiffusersConfig: { + /** CLIPEmbed_Diffusers_L_Config */ + CLIPEmbed_Diffusers_L_Config: { /** * Key * @description A unique key for this model. @@ -3592,19 +3482,10 @@ export type components = { */ name: string; /** - * Type - * @default clip_embed - * @constant - */ - type: "clip_embed"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -3612,41 +3493,64 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; + usage_info: string | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; /** @default */ - repo_variant?: components["schemas"]["ModelRepoVariant"] | null; + repo_variant: components["schemas"]["ModelRepoVariant"]; + /** + * Base + * @default any + * @constant + */ + base: "any"; + /** + * Type + * @default clip_embed + * @constant + */ + type: "clip_embed"; /** * Variant * @default large * @constant */ - variant?: "large"; + variant: "large"; + }; + /** CLIPField */ + CLIPField: { + /** @description Info to load tokenizer submodel */ + tokenizer: components["schemas"]["ModelIdentifierField"]; + /** @description Info to load text_encoder submodel */ + text_encoder: components["schemas"]["ModelIdentifierField"]; + /** + * Skipped Layers + * @description Number of skipped layers in text_encoder + */ + skipped_layers: number; + /** + * Loras + * @description LoRAs to apply on model loading + */ + loras: components["schemas"]["LoRAField"][]; }; /** * CLIPOutput @@ -3725,10 +3629,10 @@ export type components = { type: "clip_skip_output"; }; /** - * CLIPVisionDiffusersConfig + * CLIPVision_Diffusers_Config * @description Model config for CLIPVision. */ - CLIPVisionDiffusersConfig: { + CLIPVision_Diffusers_Config: { /** * Key * @description A unique key for this model. @@ -3755,19 +3659,10 @@ export type components = { */ name: string; /** - * Type - * @default clip_vision - * @constant - */ - type: "clip_vision"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -3775,35 +3670,41 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; + usage_info: string | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; /** @default */ - repo_variant?: components["schemas"]["ModelRepoVariant"] | null; + repo_variant: components["schemas"]["ModelRepoVariant"]; + /** + * Base + * @default any + * @constant + */ + base: "any"; + /** + * Type + * @default clip_vision + * @constant + */ + type: "clip_vision"; }; /** * CV2 Infill @@ -5220,13 +5121,25 @@ export type components = { */ resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; - /** - * ControlLoRADiffusersConfig - * @description Model config for Control LoRA models. - */ - ControlLoRADiffusersConfig: { + /** ControlLoRAField */ + ControlLoRAField: { + /** @description Info to load lora model */ + lora: components["schemas"]["ModelIdentifierField"]; /** - * Key + * Weight + * @description Weight to apply to lora model + */ + weight: number; + /** @description Image to use in structural conditioning */ + img: components["schemas"]["ImageField"]; + }; + /** + * ControlLoRA_LyCORIS_FLUX_Config + * @description Model config for Control LoRA models. + */ + ControlLoRA_LyCORIS_FLUX_Config: { + /** + * Key * @description A unique key for this model. */ key: string; @@ -5251,19 +5164,10 @@ export type components = { */ name: string; /** - * Type - * @default control_lora - * @constant - */ - type: "control_lora"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -5271,144 +5175,160 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["ControlAdapterDefaultSettings"] | null; + usage_info: string | null; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Base + * @default flux + * @constant */ - trigger_phrases?: string[] | null; - }; - /** ControlLoRAField */ - ControlLoRAField: { - /** @description Info to load lora model */ - lora: components["schemas"]["ModelIdentifierField"]; + base: "flux"; /** - * Weight - * @description Weight to apply to lora model + * Type + * @default control_lora + * @constant */ - weight: number; - /** @description Image to use in structural conditioning */ - img: components["schemas"]["ImageField"]; + type: "control_lora"; + /** + * Format + * @default lycoris + * @constant + */ + format: "lycoris"; + /** Trigger Phrases */ + trigger_phrases: string[] | null; }; /** - * ControlLoRALyCORISConfig - * @description Model config for Control LoRA models. + * ControlNet - SD1.5, SD2, SDXL + * @description Collects ControlNet info to pass to other nodes */ - ControlLoRALyCORISConfig: { + ControlNetInvocation: { /** - * Key - * @description A unique key for this model. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - key: string; + id: string; /** - * Hash - * @description The hash of the model file(s). + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - hash: string; + is_intermediate?: boolean; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - path: string; + use_cache?: boolean; /** - * File Size - * @description The size of the model in bytes. + * @description The control image + * @default null */ - file_size: number; + image?: components["schemas"]["ImageField"] | null; /** - * Name - * @description Name of the model. + * @description ControlNet model to load + * @default null */ - name: string; + control_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * Type - * @default control_lora - * @constant + * Control Weight + * @description The weight given to the ControlNet + * @default 1 */ - type: "control_lora"; + control_weight?: number | number[]; /** - * Format - * @default lycoris - * @constant + * Begin Step Percent + * @description When the ControlNet is first applied (% of total steps) + * @default 0 */ - format: "lycoris"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + begin_step_percent?: number; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * End Step Percent + * @description When the ControlNet is last applied (% of total steps) + * @default 1 */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + end_step_percent?: number; /** - * Description - * @description Model description + * Control Mode + * @description The control mode used + * @default balanced + * @enum {string} */ - description?: string | null; + control_mode?: "balanced" | "more_prompt" | "more_control" | "unbalanced"; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * Resize Mode + * @description The resize mode used + * @default just_resize + * @enum {string} */ - source_api_response?: string | null; + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * Cover Image - * @description Url for image to preview model + * type + * @default controlnet + * @constant */ - cover_image?: string | null; + type: "controlnet"; + }; + /** ControlNetMetadataField */ + ControlNetMetadataField: { + /** @description The control image */ + image: components["schemas"]["ImageField"]; /** - * Submodels - * @description Loadable submodels in this model + * @description The control image, after processing. + * @default null */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + processed_image?: components["schemas"]["ImageField"] | null; + /** @description The ControlNet model to use */ + control_model: components["schemas"]["ModelIdentifierField"]; /** - * Usage Info - * @description Usage information for this model + * Control Weight + * @description The weight given to the ControlNet + * @default 1 */ - usage_info?: string | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["ControlAdapterDefaultSettings"] | null; + control_weight?: number | number[]; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Begin Step Percent + * @description When the ControlNet is first applied (% of total steps) + * @default 0 */ - trigger_phrases?: string[] | null; + begin_step_percent?: number; + /** + * End Step Percent + * @description When the ControlNet is last applied (% of total steps) + * @default 1 + */ + end_step_percent?: number; + /** + * Control Mode + * @description The control mode to use + * @default balanced + * @enum {string} + */ + control_mode?: "balanced" | "more_prompt" | "more_control" | "unbalanced"; + /** + * Resize Mode + * @description The resize mode to use + * @default just_resize + * @enum {string} + */ + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; - /** - * ControlNetCheckpointConfig - * @description Model config for ControlNet models (diffusers version). - */ - ControlNetCheckpointConfig: { + /** ControlNet_Checkpoint_FLUX_Config */ + ControlNet_Checkpoint_FLUX_Config: { /** * Key * @description A unique key for this model. @@ -5435,20 +5355,10 @@ export type components = { */ name: string; /** - * Type - * @default controlnet - * @constant - */ - type: "controlnet"; - /** - * Format - * @description Format of the provided checkpoint model - * @default checkpoint - * @enum {string} + * Description + * @description Model description */ - format: "checkpoint" | "bnb_quantized_nf4b" | "gguf_quantized"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -5456,51 +5366,48 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["ControlAdapterDefaultSettings"] | null; + usage_info: string | null; /** * Config Path - * @description path to the checkpoint model config file + * @description Path to the config for this model, if any. + */ + config_path: string | null; + /** + * Type + * @default controlnet + * @constant + */ + type: "controlnet"; + /** + * Format + * @default checkpoint + * @constant */ - config_path: string; + format: "checkpoint"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Converted At - * @description When this model was last converted to diffusers + * Base + * @default flux + * @constant */ - converted_at?: number | null; + base: "flux"; }; - /** - * ControlNetDiffusersConfig - * @description Model config for ControlNet models (diffusers version). - */ - ControlNetDiffusersConfig: { + /** ControlNet_Checkpoint_SD1_Config */ + ControlNet_Checkpoint_SD1_Config: { /** * Key * @description A unique key for this model. @@ -5527,19 +5434,10 @@ export type components = { */ name: string; /** - * Type - * @default controlnet - * @constant - */ - type: "controlnet"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -5547,592 +5445,527 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["ControlAdapterDefaultSettings"] | null; - /** @default */ - repo_variant?: components["schemas"]["ModelRepoVariant"] | null; - }; - /** - * ControlNet - SD1.5, SD2, SDXL - * @description Collects ControlNet info to pass to other nodes - */ - ControlNetInvocation: { + usage_info: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Config Path + * @description Path to the config for this model, if any. */ - id: string; + config_path: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Type + * @default controlnet + * @constant */ - is_intermediate?: boolean; + type: "controlnet"; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Format + * @default checkpoint + * @constant */ - use_cache?: boolean; + format: "checkpoint"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * @description The control image - * @default null + * Base + * @default sd-1 + * @constant */ - image?: components["schemas"]["ImageField"] | null; + base: "sd-1"; + }; + /** ControlNet_Checkpoint_SD2_Config */ + ControlNet_Checkpoint_SD2_Config: { /** - * @description ControlNet model to load - * @default null + * Key + * @description A unique key for this model. */ - control_model?: components["schemas"]["ModelIdentifierField"] | null; + key: string; /** - * Control Weight - * @description The weight given to the ControlNet - * @default 1 + * Hash + * @description The hash of the model file(s). */ - control_weight?: number | number[]; + hash: string; /** - * Begin Step Percent - * @description When the ControlNet is first applied (% of total steps) - * @default 0 + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - begin_step_percent?: number; + path: string; /** - * End Step Percent - * @description When the ControlNet is last applied (% of total steps) - * @default 1 + * File Size + * @description The size of the model in bytes. */ - end_step_percent?: number; + file_size: number; /** - * Control Mode - * @description The control mode used - * @default balanced - * @enum {string} + * Name + * @description Name of the model. */ - control_mode?: "balanced" | "more_prompt" | "more_control" | "unbalanced"; + name: string; /** - * Resize Mode - * @description The resize mode used - * @default just_resize - * @enum {string} + * Description + * @description Model description */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + description: string | null; /** - * type - * @default controlnet - * @constant + * Source + * @description The original source of the model (path, URL or repo_id). */ - type: "controlnet"; - }; - /** ControlNetMetadataField */ - ControlNetMetadataField: { - /** @description The control image */ - image: components["schemas"]["ImageField"]; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * @description The control image, after processing. - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - processed_image?: components["schemas"]["ImageField"] | null; - /** @description The ControlNet model to use */ - control_model: components["schemas"]["ModelIdentifierField"]; + source_api_response: string | null; /** - * Control Weight - * @description The weight given to the ControlNet - * @default 1 + * Cover Image + * @description Url for image to preview model */ - control_weight?: number | number[]; + cover_image: string | null; /** - * Begin Step Percent - * @description When the ControlNet is first applied (% of total steps) - * @default 0 + * Usage Info + * @description Usage information for this model */ - begin_step_percent?: number; + usage_info: string | null; /** - * End Step Percent - * @description When the ControlNet is last applied (% of total steps) - * @default 1 + * Config Path + * @description Path to the config for this model, if any. */ - end_step_percent?: number; + config_path: string | null; /** - * Control Mode - * @description The control mode to use - * @default balanced - * @enum {string} + * Type + * @default controlnet + * @constant */ - control_mode?: "balanced" | "more_prompt" | "more_control" | "unbalanced"; + type: "controlnet"; /** - * Resize Mode - * @description The resize mode to use - * @default just_resize - * @enum {string} + * Format + * @default checkpoint + * @constant */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; - }; - /** - * ControlOutput - * @description node output for ControlNet info - */ - ControlOutput: { - /** @description ControlNet(s) to apply */ - control: components["schemas"]["ControlField"]; + format: "checkpoint"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * type - * @default control_output + * Base + * @default sd-2 * @constant */ - type: "control_output"; + base: "sd-2"; }; - /** - * Core Metadata - * @description Used internally by Invoke to collect metadata for generations. - */ - CoreMetadataInvocation: { + /** ControlNet_Checkpoint_SDXL_Config */ + ControlNet_Checkpoint_SDXL_Config: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Key + * @description A unique key for this model. */ - id: string; + key: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Hash + * @description The hash of the model file(s). */ - is_intermediate?: boolean; + hash: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - use_cache?: boolean; + path: string; /** - * Generation Mode - * @description The generation mode that output this image - * @default null + * File Size + * @description The size of the model in bytes. */ - generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint" | "flux_txt2img" | "flux_img2img" | "flux_inpaint" | "flux_outpaint" | "sd3_txt2img" | "sd3_img2img" | "sd3_inpaint" | "sd3_outpaint" | "cogview4_txt2img" | "cogview4_img2img" | "cogview4_inpaint" | "cogview4_outpaint") | null; + file_size: number; /** - * Positive Prompt - * @description The positive prompt parameter - * @default null + * Name + * @description Name of the model. */ - positive_prompt?: string | null; + name: string; /** - * Negative Prompt - * @description The negative prompt parameter - * @default null + * Description + * @description Model description */ - negative_prompt?: string | null; + description: string | null; /** - * Width - * @description The width parameter - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - width?: number | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Height - * @description The height parameter - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - height?: number | null; + source_api_response: string | null; /** - * Seed - * @description The seed used for noise generation - * @default null + * Cover Image + * @description Url for image to preview model */ - seed?: number | null; + cover_image: string | null; /** - * Rand Device - * @description The device used for random number generation - * @default null + * Usage Info + * @description Usage information for this model */ - rand_device?: string | null; + usage_info: string | null; /** - * Cfg Scale - * @description The classifier-free guidance scale parameter - * @default null + * Config Path + * @description Path to the config for this model, if any. */ - cfg_scale?: number | null; + config_path: string | null; /** - * Cfg Rescale Multiplier - * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR - * @default null + * Type + * @default controlnet + * @constant */ - cfg_rescale_multiplier?: number | null; + type: "controlnet"; /** - * Steps - * @description The number of steps used for inference - * @default null + * Format + * @default checkpoint + * @constant */ - steps?: number | null; + format: "checkpoint"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Scheduler - * @description The scheduler used for inference - * @default null + * Base + * @default sdxl + * @constant */ - scheduler?: string | null; + base: "sdxl"; + }; + /** ControlNet_Diffusers_FLUX_Config */ + ControlNet_Diffusers_FLUX_Config: { /** - * Seamless X - * @description Whether seamless tiling was used on the X axis - * @default null + * Key + * @description A unique key for this model. */ - seamless_x?: boolean | null; + key: string; /** - * Seamless Y - * @description Whether seamless tiling was used on the Y axis - * @default null + * Hash + * @description The hash of the model file(s). */ - seamless_y?: boolean | null; + hash: string; /** - * Clip Skip - * @description The number of skipped CLIP layers - * @default null + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - clip_skip?: number | null; + path: string; /** - * @description The main model used for inference - * @default null + * File Size + * @description The size of the model in bytes. */ - model?: components["schemas"]["ModelIdentifierField"] | null; + file_size: number; /** - * Controlnets - * @description The ControlNets used for inference - * @default null + * Name + * @description Name of the model. */ - controlnets?: components["schemas"]["ControlNetMetadataField"][] | null; + name: string; /** - * Ipadapters - * @description The IP Adapters used for inference - * @default null + * Description + * @description Model description */ - ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; + description: string | null; /** - * T2Iadapters - * @description The IP Adapters used for inference - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - t2iAdapters?: components["schemas"]["T2IAdapterMetadataField"][] | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Loras - * @description The LoRAs used for inference - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - loras?: components["schemas"]["LoRAMetadataField"][] | null; + source_api_response: string | null; /** - * Strength - * @description The strength used for latents-to-latents - * @default null + * Cover Image + * @description Url for image to preview model */ - strength?: number | null; + cover_image: string | null; /** - * Init Image - * @description The name of the initial image - * @default null + * Usage Info + * @description Usage information for this model */ - init_image?: string | null; + usage_info: string | null; /** - * @description The VAE used for decoding, if the main model's default was not used - * @default null + * Format + * @default diffusers + * @constant */ - vae?: components["schemas"]["ModelIdentifierField"] | null; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * Hrf Enabled - * @description Whether or not high resolution fix was enabled. - * @default null + * Type + * @default controlnet + * @constant */ - hrf_enabled?: boolean | null; + type: "controlnet"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Hrf Method - * @description The high resolution fix upscale method. - * @default null + * Base + * @default flux + * @constant */ - hrf_method?: string | null; + base: "flux"; + }; + /** ControlNet_Diffusers_SD1_Config */ + ControlNet_Diffusers_SD1_Config: { /** - * Hrf Strength - * @description The high resolution fix img2img strength used in the upscale pass. - * @default null + * Key + * @description A unique key for this model. */ - hrf_strength?: number | null; + key: string; /** - * Positive Style Prompt - * @description The positive style prompt parameter - * @default null + * Hash + * @description The hash of the model file(s). */ - positive_style_prompt?: string | null; + hash: string; /** - * Negative Style Prompt - * @description The negative style prompt parameter - * @default null + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - negative_style_prompt?: string | null; + path: string; /** - * @description The SDXL Refiner model used - * @default null + * File Size + * @description The size of the model in bytes. */ - refiner_model?: components["schemas"]["ModelIdentifierField"] | null; + file_size: number; /** - * Refiner Cfg Scale - * @description The classifier-free guidance scale parameter used for the refiner - * @default null + * Name + * @description Name of the model. */ - refiner_cfg_scale?: number | null; + name: string; /** - * Refiner Steps - * @description The number of steps used for the refiner - * @default null + * Description + * @description Model description */ - refiner_steps?: number | null; + description: string | null; /** - * Refiner Scheduler - * @description The scheduler used for the refiner - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - refiner_scheduler?: string | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Refiner Positive Aesthetic Score - * @description The aesthetic score used for the refiner - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - refiner_positive_aesthetic_score?: number | null; + source_api_response: string | null; /** - * Refiner Negative Aesthetic Score - * @description The aesthetic score used for the refiner - * @default null + * Cover Image + * @description Url for image to preview model */ - refiner_negative_aesthetic_score?: number | null; + cover_image: string | null; /** - * Refiner Start - * @description The start value used for refiner denoising - * @default null + * Usage Info + * @description Usage information for this model */ - refiner_start?: number | null; + usage_info: string | null; /** - * type - * @default core_metadata + * Format + * @default diffusers * @constant */ - type: "core_metadata"; - } & { - [key: string]: unknown; - }; - /** - * Create Denoise Mask - * @description Creates mask for denoising model run. - */ - CreateDenoiseMaskInvocation: { + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Type + * @default controlnet + * @constant */ - id: string; + type: "controlnet"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Base + * @default sd-1 + * @constant */ - is_intermediate?: boolean; + base: "sd-1"; + }; + /** ControlNet_Diffusers_SD2_Config */ + ControlNet_Diffusers_SD2_Config: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Key + * @description A unique key for this model. */ - use_cache?: boolean; + key: string; /** - * @description VAE - * @default null + * Hash + * @description The hash of the model file(s). */ - vae?: components["schemas"]["VAEField"] | null; + hash: string; /** - * @description Image which will be masked - * @default null + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - image?: components["schemas"]["ImageField"] | null; + path: string; /** - * @description The mask to use when pasting - * @default null + * File Size + * @description The size of the model in bytes. */ - mask?: components["schemas"]["ImageField"] | null; + file_size: number; /** - * Tiled - * @description Processing using overlapping tiles (reduce memory consumption) - * @default false + * Name + * @description Name of the model. */ - tiled?: boolean; + name: string; /** - * Fp32 - * @description Whether or not to use full float32 precision - * @default false + * Description + * @description Model description */ - fp32?: boolean; + description: string | null; /** - * type - * @default create_denoise_mask - * @constant + * Source + * @description The original source of the model (path, URL or repo_id). */ - type: "create_denoise_mask"; - }; - /** - * Create Gradient Mask - * @description Creates mask for denoising. - */ - CreateGradientMaskInvocation: { + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - id: string; + source_api_response: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Cover Image + * @description Url for image to preview model */ - is_intermediate?: boolean; + cover_image: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Usage Info + * @description Usage information for this model */ - use_cache?: boolean; + usage_info: string | null; /** - * @description Image which will be masked - * @default null - */ - mask?: components["schemas"]["ImageField"] | null; - /** - * Edge Radius - * @description How far to expand the edges of the mask - * @default 16 + * Format + * @default diffusers + * @constant */ - edge_radius?: number; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * Coherence Mode - * @default Gaussian Blur - * @enum {string} + * Type + * @default controlnet + * @constant */ - coherence_mode?: "Gaussian Blur" | "Box Blur" | "Staged"; + type: "controlnet"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Minimum Denoise - * @description Minimum denoise level for the coherence region - * @default 0 + * Base + * @default sd-2 + * @constant */ - minimum_denoise?: number; + base: "sd-2"; + }; + /** ControlNet_Diffusers_SDXL_Config */ + ControlNet_Diffusers_SDXL_Config: { /** - * [OPTIONAL] Image - * @description OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE - * @default null + * Key + * @description A unique key for this model. */ - image?: components["schemas"]["ImageField"] | null; + key: string; /** - * [OPTIONAL] UNet - * @description OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE - * @default null + * Hash + * @description The hash of the model file(s). */ - unet?: components["schemas"]["UNetField"] | null; + hash: string; /** - * [OPTIONAL] VAE - * @description OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE - * @default null + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - vae?: components["schemas"]["VAEField"] | null; + path: string; /** - * Tiled - * @description Processing using overlapping tiles (reduce memory consumption) - * @default false + * File Size + * @description The size of the model in bytes. */ - tiled?: boolean; + file_size: number; /** - * Fp32 - * @description Whether or not to use full float32 precision - * @default false + * Name + * @description Name of the model. */ - fp32?: boolean; + name: string; /** - * type - * @default create_gradient_mask - * @constant + * Description + * @description Model description */ - type: "create_gradient_mask"; - }; - /** - * Crop Image to Bounding Box - * @description Crop an image to the given bounding box. If the bounding box is omitted, the image is cropped to the non-transparent pixels. - */ - CropImageToBoundingBoxInvocation: { + description: string | null; /** - * @description The board to save the image to - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - board?: components["schemas"]["BoardField"] | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * @description Optional metadata to be saved with the image - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - metadata?: components["schemas"]["MetadataField"] | null; + source_api_response: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Cover Image + * @description Url for image to preview model */ - id: string; + cover_image: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Usage Info + * @description Usage information for this model */ - is_intermediate?: boolean; + usage_info: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Format + * @default diffusers + * @constant */ - use_cache?: boolean; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * @description The image to crop - * @default null + * Type + * @default controlnet + * @constant */ - image?: components["schemas"]["ImageField"] | null; + type: "controlnet"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * @description The bounding box to crop the image to - * @default null + * Base + * @default sdxl + * @constant */ - bounding_box?: components["schemas"]["BoundingBoxField"] | null; + base: "sdxl"; + }; + /** + * ControlOutput + * @description node output for ControlNet info + */ + ControlOutput: { + /** @description ControlNet(s) to apply */ + control: components["schemas"]["ControlField"]; /** * type - * @default crop_image_to_bounding_box + * @default control_output * @constant */ - type: "crop_image_to_bounding_box"; + type: "control_output"; }; /** - * Crop Latents - * @description Crops a latent-space tensor to a box specified in image-space. The box dimensions and coordinates must be - * divisible by the latent scale factor of 8. + * Core Metadata + * @description Used internally by Invoke to collect metadata for generations. */ - CropLatentsCoreInvocation: { + CoreMetadataInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -6151,220 +5984,220 @@ export type components = { */ use_cache?: boolean; /** - * @description Latents tensor + * Generation Mode + * @description The generation mode that output this image * @default null */ - latents?: components["schemas"]["LatentsField"] | null; + generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint" | "flux_txt2img" | "flux_img2img" | "flux_inpaint" | "flux_outpaint" | "sd3_txt2img" | "sd3_img2img" | "sd3_inpaint" | "sd3_outpaint" | "cogview4_txt2img" | "cogview4_img2img" | "cogview4_inpaint" | "cogview4_outpaint") | null; /** - * X - * @description The left x coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * Positive Prompt + * @description The positive prompt parameter * @default null */ - x?: number | null; + positive_prompt?: string | null; /** - * Y - * @description The top y coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * Negative Prompt + * @description The negative prompt parameter * @default null */ - y?: number | null; + negative_prompt?: string | null; /** * Width - * @description The width (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @description The width parameter * @default null */ width?: number | null; /** * Height - * @description The height (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @description The height parameter * @default null */ height?: number | null; /** - * type - * @default crop_latents - * @constant + * Seed + * @description The seed used for noise generation + * @default null */ - type: "crop_latents"; - }; - /** - * OpenCV Inpaint - * @description Simple inpaint using opencv. - */ - CvInpaintInvocation: { + seed?: number | null; /** - * @description The board to save the image to + * Rand Device + * @description The device used for random number generation * @default null */ - board?: components["schemas"]["BoardField"] | null; + rand_device?: string | null; /** - * @description Optional metadata to be saved with the image + * Cfg Scale + * @description The classifier-free guidance scale parameter * @default null */ - metadata?: components["schemas"]["MetadataField"] | null; + cfg_scale?: number | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Cfg Rescale Multiplier + * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR + * @default null */ - id: string; + cfg_rescale_multiplier?: number | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Steps + * @description The number of steps used for inference + * @default null */ - is_intermediate?: boolean; + steps?: number | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Scheduler + * @description The scheduler used for inference + * @default null */ - use_cache?: boolean; + scheduler?: string | null; /** - * @description The image to inpaint + * Seamless X + * @description Whether seamless tiling was used on the X axis * @default null */ - image?: components["schemas"]["ImageField"] | null; + seamless_x?: boolean | null; /** - * @description The mask to use when inpainting + * Seamless Y + * @description Whether seamless tiling was used on the Y axis * @default null */ - mask?: components["schemas"]["ImageField"] | null; + seamless_y?: boolean | null; /** - * type - * @default cv_inpaint - * @constant + * Clip Skip + * @description The number of skipped CLIP layers + * @default null */ - type: "cv_inpaint"; - }; - /** - * DW Openpose Detection - * @description Generates an openpose pose from an image using DWPose - */ - DWOpenposeDetectionInvocation: { + clip_skip?: number | null; /** - * @description The board to save the image to + * @description The main model used for inference * @default null */ - board?: components["schemas"]["BoardField"] | null; + model?: components["schemas"]["ModelIdentifierField"] | null; /** - * @description Optional metadata to be saved with the image + * Controlnets + * @description The ControlNets used for inference * @default null */ - metadata?: components["schemas"]["MetadataField"] | null; + controlnets?: components["schemas"]["ControlNetMetadataField"][] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Ipadapters + * @description The IP Adapters used for inference + * @default null */ - id: string; + ipAdapters?: components["schemas"]["IPAdapterMetadataField"][] | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * T2Iadapters + * @description The IP Adapters used for inference + * @default null */ - is_intermediate?: boolean; + t2iAdapters?: components["schemas"]["T2IAdapterMetadataField"][] | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Loras + * @description The LoRAs used for inference + * @default null */ - use_cache?: boolean; + loras?: components["schemas"]["LoRAMetadataField"][] | null; /** - * @description The image to process + * Strength + * @description The strength used for latents-to-latents * @default null */ - image?: components["schemas"]["ImageField"] | null; + strength?: number | null; /** - * Draw Body - * @default true + * Init Image + * @description The name of the initial image + * @default null */ - draw_body?: boolean; + init_image?: string | null; /** - * Draw Face - * @default false + * @description The VAE used for decoding, if the main model's default was not used + * @default null */ - draw_face?: boolean; + vae?: components["schemas"]["ModelIdentifierField"] | null; /** - * Draw Hands - * @default false + * Hrf Enabled + * @description Whether or not high resolution fix was enabled. + * @default null */ - draw_hands?: boolean; + hrf_enabled?: boolean | null; /** - * type - * @default dw_openpose_detection - * @constant + * Hrf Method + * @description The high resolution fix upscale method. + * @default null */ - type: "dw_openpose_detection"; - }; - /** - * DeleteAllExceptCurrentResult - * @description Result of deleting all except current - */ - DeleteAllExceptCurrentResult: { + hrf_method?: string | null; /** - * Deleted - * @description Number of queue items deleted + * Hrf Strength + * @description The high resolution fix img2img strength used in the upscale pass. + * @default null */ - deleted: number; - }; - /** DeleteBoardResult */ - DeleteBoardResult: { + hrf_strength?: number | null; /** - * Board Id - * @description The id of the board that was deleted. + * Positive Style Prompt + * @description The positive style prompt parameter + * @default null */ - board_id: string; + positive_style_prompt?: string | null; /** - * Deleted Board Images - * @description The image names of the board-images relationships that were deleted. + * Negative Style Prompt + * @description The negative style prompt parameter + * @default null */ - deleted_board_images: string[]; + negative_style_prompt?: string | null; /** - * Deleted Images - * @description The names of the images that were deleted. + * @description The SDXL Refiner model used + * @default null */ - deleted_images: string[]; - }; - /** - * DeleteByDestinationResult - * @description Result of deleting by a destination - */ - DeleteByDestinationResult: { + refiner_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * Deleted - * @description Number of queue items deleted + * Refiner Cfg Scale + * @description The classifier-free guidance scale parameter used for the refiner + * @default null */ - deleted: number; - }; - /** DeleteImagesResult */ - DeleteImagesResult: { + refiner_cfg_scale?: number | null; /** - * Affected Boards - * @description The ids of boards affected by the delete operation + * Refiner Steps + * @description The number of steps used for the refiner + * @default null */ - affected_boards: string[]; + refiner_steps?: number | null; /** - * Deleted Images - * @description The names of the images that were deleted + * Refiner Scheduler + * @description The scheduler used for the refiner + * @default null */ - deleted_images: string[]; - }; - /** DeleteVideosResult */ - DeleteVideosResult: { + refiner_scheduler?: string | null; /** - * Affected Boards - * @description The ids of boards affected by the delete operation + * Refiner Positive Aesthetic Score + * @description The aesthetic score used for the refiner + * @default null */ - affected_boards: string[]; + refiner_positive_aesthetic_score?: number | null; /** - * Deleted Videos - * @description The ids of the videos that were deleted + * Refiner Negative Aesthetic Score + * @description The aesthetic score used for the refiner + * @default null */ - deleted_videos: string[]; + refiner_negative_aesthetic_score?: number | null; + /** + * Refiner Start + * @description The start value used for refiner denoising + * @default null + */ + refiner_start?: number | null; + /** + * type + * @default core_metadata + * @constant + */ + type: "core_metadata"; + } & { + [key: string]: unknown; }; /** - * Denoise - SD1.5, SDXL - * @description Denoises noisy latents to decodable images + * Create Denoise Mask + * @description Creates mask for denoising model run. */ - DenoiseLatentsInvocation: { + CreateDenoiseMaskInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -6383,106 +6216,44 @@ export type components = { */ use_cache?: boolean; /** - * Positive Conditioning - * @description Positive conditioning tensor - * @default null - */ - positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; - /** - * Negative Conditioning - * @description Negative conditioning tensor - * @default null - */ - negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; - /** - * @description Noise tensor - * @default null - */ - noise?: components["schemas"]["LatentsField"] | null; - /** - * Steps - * @description Number of steps to run - * @default 10 - */ - steps?: number; - /** - * CFG Scale - * @description Classifier-Free Guidance scale - * @default 7.5 - */ - cfg_scale?: number | number[]; - /** - * Denoising Start - * @description When to start denoising, expressed a percentage of total steps - * @default 0 - */ - denoising_start?: number; - /** - * Denoising End - * @description When to stop denoising, expressed a percentage of total steps - * @default 1 - */ - denoising_end?: number; - /** - * Scheduler - * @description Scheduler to use during inference - * @default euler - * @enum {string} - */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet?: components["schemas"]["UNetField"] | null; - /** - * Control + * @description VAE * @default null */ - control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + vae?: components["schemas"]["VAEField"] | null; /** - * IP-Adapter - * @description IP-Adapter to apply + * @description Image which will be masked * @default null */ - ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + image?: components["schemas"]["ImageField"] | null; /** - * T2I-Adapter - * @description T2I-Adapter(s) to apply + * @description The mask to use when pasting * @default null */ - t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; - /** - * CFG Rescale Multiplier - * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR - * @default 0 - */ - cfg_rescale_multiplier?: number; + mask?: components["schemas"]["ImageField"] | null; /** - * @description Latents tensor - * @default null + * Tiled + * @description Processing using overlapping tiles (reduce memory consumption) + * @default false */ - latents?: components["schemas"]["LatentsField"] | null; + tiled?: boolean; /** - * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. - * @default null + * Fp32 + * @description Whether or not to use full float32 precision + * @default false */ - denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + fp32?: boolean; /** * type - * @default denoise_latents + * @default create_denoise_mask * @constant */ - type: "denoise_latents"; + type: "create_denoise_mask"; }; - /** Denoise - SD1.5, SDXL + Metadata */ - DenoiseLatentsMetaInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + /** + * Create Gradient Mask + * @description Creates mask for denoising. + */ + CreateGradientMaskInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -6501,141 +6272,178 @@ export type components = { */ use_cache?: boolean; /** - * Positive Conditioning - * @description Positive conditioning tensor + * @description Image which will be masked * @default null */ - positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; + mask?: components["schemas"]["ImageField"] | null; /** - * Negative Conditioning - * @description Negative conditioning tensor - * @default null - */ - negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; + * Edge Radius + * @description How far to expand the edges of the mask + * @default 16 + */ + edge_radius?: number; /** - * @description Noise tensor + * Coherence Mode + * @default Gaussian Blur + * @enum {string} + */ + coherence_mode?: "Gaussian Blur" | "Box Blur" | "Staged"; + /** + * Minimum Denoise + * @description Minimum denoise level for the coherence region + * @default 0 + */ + minimum_denoise?: number; + /** + * [OPTIONAL] Image + * @description OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE * @default null */ - noise?: components["schemas"]["LatentsField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * Steps - * @description Number of steps to run - * @default 10 + * [OPTIONAL] UNet + * @description OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE + * @default null */ - steps?: number; + unet?: components["schemas"]["UNetField"] | null; /** - * CFG Scale - * @description Classifier-Free Guidance scale - * @default 7.5 + * [OPTIONAL] VAE + * @description OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE + * @default null */ - cfg_scale?: number | number[]; + vae?: components["schemas"]["VAEField"] | null; /** - * Denoising Start - * @description When to start denoising, expressed a percentage of total steps - * @default 0 + * Tiled + * @description Processing using overlapping tiles (reduce memory consumption) + * @default false */ - denoising_start?: number; + tiled?: boolean; /** - * Denoising End - * @description When to stop denoising, expressed a percentage of total steps - * @default 1 + * Fp32 + * @description Whether or not to use full float32 precision + * @default false */ - denoising_end?: number; + fp32?: boolean; /** - * Scheduler - * @description Scheduler to use during inference - * @default euler - * @enum {string} + * type + * @default create_gradient_mask + * @constant */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + type: "create_gradient_mask"; + }; + /** + * Crop Image to Bounding Box + * @description Crop an image to the given bounding box. If the bounding box is omitted, the image is cropped to the non-transparent pixels. + */ + CropImageToBoundingBoxInvocation: { /** - * UNet - * @description UNet (scheduler, LoRAs) + * @description The board to save the image to * @default null */ - unet?: components["schemas"]["UNetField"] | null; + board?: components["schemas"]["BoardField"] | null; /** - * Control + * @description Optional metadata to be saved with the image * @default null */ - control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * IP-Adapter - * @description IP-Adapter to apply - * @default null + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + id: string; /** - * T2I-Adapter - * @description T2I-Adapter(s) to apply - * @default null + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; + is_intermediate?: boolean; /** - * CFG Rescale Multiplier - * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR - * @default 0 + * Use Cache + * @description Whether or not to use the cache + * @default true */ - cfg_rescale_multiplier?: number; + use_cache?: boolean; /** - * @description Latents tensor + * @description The image to crop * @default null */ - latents?: components["schemas"]["LatentsField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. + * @description The bounding box to crop the image to * @default null */ - denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + bounding_box?: components["schemas"]["BoundingBoxField"] | null; /** * type - * @default denoise_latents_meta + * @default crop_image_to_bounding_box * @constant */ - type: "denoise_latents_meta"; + type: "crop_image_to_bounding_box"; }; /** - * DenoiseMaskField - * @description An inpaint mask field + * Crop Latents + * @description Crops a latent-space tensor to a box specified in image-space. The box dimensions and coordinates must be + * divisible by the latent scale factor of 8. */ - DenoiseMaskField: { + CropLatentsCoreInvocation: { /** - * Mask Name - * @description The name of the mask image + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - mask_name: string; + id: string; /** - * Masked Latents Name - * @description The name of the masked image latents + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description Latents tensor * @default null */ - masked_latents_name?: string | null; + latents?: components["schemas"]["LatentsField"] | null; /** - * Gradient - * @description Used for gradient inpainting - * @default false + * X + * @description The left x coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @default null */ - gradient?: boolean; - }; - /** - * DenoiseMaskOutput - * @description Base class for nodes that output a single image - */ - DenoiseMaskOutput: { - /** @description Mask for denoise model run */ - denoise_mask: components["schemas"]["DenoiseMaskField"]; + x?: number | null; + /** + * Y + * @description The top y coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @default null + */ + y?: number | null; + /** + * Width + * @description The width (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @default null + */ + width?: number | null; + /** + * Height + * @description The height (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. + * @default null + */ + height?: number | null; /** * type - * @default denoise_mask_output + * @default crop_latents * @constant */ - type: "denoise_mask_output"; + type: "crop_latents"; }; /** - * Depth Anything Depth Estimation - * @description Generates a depth map using a Depth Anything model. + * OpenCV Inpaint + * @description Simple inpaint using opencv. */ - DepthAnythingDepthEstimationInvocation: { + CvInpaintInvocation: { /** * @description The board to save the image to * @default null @@ -6664,29 +6472,37 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process + * @description The image to inpaint * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Model Size - * @description The size of the depth model to use - * @default small_v2 - * @enum {string} + * @description The mask to use when inpainting + * @default null */ - model_size?: "large" | "base" | "small" | "small_v2"; + mask?: components["schemas"]["ImageField"] | null; /** * type - * @default depth_anything_depth_estimation + * @default cv_inpaint * @constant */ - type: "depth_anything_depth_estimation"; + type: "cv_inpaint"; }; /** - * Divide Integers - * @description Divides two numbers + * DW Openpose Detection + * @description Generates an openpose pose from an image using DWPose */ - DivideInvocation: { + DWOpenposeDetectionInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -6705,401 +6521,374 @@ export type components = { */ use_cache?: boolean; /** - * A - * @description The first number - * @default 0 + * @description The image to process + * @default null */ - a?: number; + image?: components["schemas"]["ImageField"] | null; /** - * B - * @description The second number - * @default 0 + * Draw Body + * @default true */ - b?: number; + draw_body?: boolean; + /** + * Draw Face + * @default false + */ + draw_face?: boolean; + /** + * Draw Hands + * @default false + */ + draw_hands?: boolean; /** * type - * @default div + * @default dw_openpose_detection * @constant */ - type: "div"; + type: "dw_openpose_detection"; }; /** - * DownloadCancelledEvent - * @description Event model for download_cancelled + * DeleteAllExceptCurrentResult + * @description Result of deleting all except current */ - DownloadCancelledEvent: { + DeleteAllExceptCurrentResult: { /** - * Timestamp - * @description The timestamp of the event + * Deleted + * @description Number of queue items deleted */ - timestamp: number; - /** - * Source - * @description The source of the download - */ - source: string; + deleted: number; }; - /** - * DownloadCompleteEvent - * @description Event model for download_complete - */ - DownloadCompleteEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; + /** DeleteBoardResult */ + DeleteBoardResult: { /** - * Source - * @description The source of the download + * Board Id + * @description The id of the board that was deleted. */ - source: string; + board_id: string; /** - * Download Path - * @description The local path where the download is saved + * Deleted Board Images + * @description The image names of the board-images relationships that were deleted. */ - download_path: string; + deleted_board_images: string[]; /** - * Total Bytes - * @description The total number of bytes downloaded + * Deleted Images + * @description The names of the images that were deleted. */ - total_bytes: number; + deleted_images: string[]; }; /** - * DownloadErrorEvent - * @description Event model for download_error + * DeleteByDestinationResult + * @description Result of deleting by a destination */ - DownloadErrorEvent: { + DeleteByDestinationResult: { /** - * Timestamp - * @description The timestamp of the event + * Deleted + * @description Number of queue items deleted */ - timestamp: number; + deleted: number; + }; + /** DeleteImagesResult */ + DeleteImagesResult: { /** - * Source - * @description The source of the download + * Affected Boards + * @description The ids of boards affected by the delete operation */ - source: string; + affected_boards: string[]; /** - * Error Type - * @description The type of error + * Deleted Images + * @description The names of the images that were deleted */ - error_type: string; + deleted_images: string[]; + }; + /** DeleteVideosResult */ + DeleteVideosResult: { /** - * Error - * @description The error message + * Affected Boards + * @description The ids of boards affected by the delete operation */ - error: string; + affected_boards: string[]; + /** + * Deleted Videos + * @description The ids of the videos that were deleted + */ + deleted_videos: string[]; }; /** - * DownloadJob - * @description Class to monitor and control a model download request. + * Denoise - SD1.5, SDXL + * @description Denoises noisy latents to decodable images */ - DownloadJob: { + DenoiseLatentsInvocation: { /** * Id - * @description Numeric ID of this job - * @default -1 + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - id?: number; + id: string; /** - * Dest - * Format: path - * @description Initial destination of downloaded model on local disk; a directory or file path + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - dest: string; + is_intermediate?: boolean; /** - * Download Path - * @description Final location of downloaded file or directory + * Use Cache + * @description Whether or not to use the cache + * @default true */ - download_path?: string | null; + use_cache?: boolean; /** - * @description Status of the download - * @default waiting + * Positive Conditioning + * @description Positive conditioning tensor + * @default null */ - status?: components["schemas"]["DownloadJobStatus"]; + positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; /** - * Bytes - * @description Bytes downloaded so far - * @default 0 + * Negative Conditioning + * @description Negative conditioning tensor + * @default null */ - bytes?: number; + negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; /** - * Total Bytes - * @description Total file size (bytes) - * @default 0 + * @description Noise tensor + * @default null */ - total_bytes?: number; + noise?: components["schemas"]["LatentsField"] | null; /** - * Error Type - * @description Name of exception that caused an error + * Steps + * @description Number of steps to run + * @default 10 */ - error_type?: string | null; + steps?: number; /** - * Error - * @description Traceback of the exception that caused an error + * CFG Scale + * @description Classifier-Free Guidance scale + * @default 7.5 */ - error?: string | null; + cfg_scale?: number | number[]; /** - * Source - * Format: uri - * @description Where to download from. Specific types specified in child classes. + * Denoising Start + * @description When to start denoising, expressed a percentage of total steps + * @default 0 */ - source: string; + denoising_start?: number; /** - * Access Token - * @description authorization token for protected resources + * Denoising End + * @description When to stop denoising, expressed a percentage of total steps + * @default 1 */ - access_token?: string | null; + denoising_end?: number; /** - * Priority - * @description Queue priority; lower values are higher priority - * @default 10 + * Scheduler + * @description Scheduler to use during inference + * @default euler + * @enum {string} */ - priority?: number; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** - * Job Started - * @description Timestamp for when the download job started + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - job_started?: string | null; + unet?: components["schemas"]["UNetField"] | null; /** - * Job Ended - * @description Timestamp for when the download job ende1d (completed or errored) + * Control + * @default null */ - job_ended?: string | null; + control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; /** - * Content Type - * @description Content type of downloaded file + * IP-Adapter + * @description IP-Adapter to apply + * @default null */ - content_type?: string | null; - }; - /** - * DownloadJobStatus - * @description State of a download job. - * @enum {string} - */ - DownloadJobStatus: "waiting" | "running" | "completed" | "cancelled" | "error"; - /** - * DownloadProgressEvent - * @description Event model for download_progress - */ - DownloadProgressEvent: { + ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; /** - * Timestamp - * @description The timestamp of the event + * T2I-Adapter + * @description T2I-Adapter(s) to apply + * @default null */ - timestamp: number; + t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; /** - * Source - * @description The source of the download + * CFG Rescale Multiplier + * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR + * @default 0 */ - source: string; + cfg_rescale_multiplier?: number; /** - * Download Path - * @description The local path where the download is saved + * @description Latents tensor + * @default null */ - download_path: string; + latents?: components["schemas"]["LatentsField"] | null; /** - * Current Bytes - * @description The number of bytes downloaded so far + * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. + * @default null */ - current_bytes: number; + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; /** - * Total Bytes - * @description The total number of bytes to be downloaded + * type + * @default denoise_latents + * @constant */ - total_bytes: number; + type: "denoise_latents"; }; - /** - * DownloadStartedEvent - * @description Event model for download_started - */ - DownloadStartedEvent: { + /** Denoise - SD1.5, SDXL + Metadata */ + DenoiseLatentsMetaInvocation: { /** - * Timestamp - * @description The timestamp of the event + * @description Optional metadata to be saved with the image + * @default null */ - timestamp: number; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Source - * @description The source of the download + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - source: string; + id: string; /** - * Download Path - * @description The local path where the download is saved - */ - download_path: string; - }; - /** - * Dynamic Prompt - * @description Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator - */ - DynamicPromptInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ is_intermediate?: boolean; /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ use_cache?: boolean; /** - * Prompt - * @description The prompt to parse with dynamicprompts + * Positive Conditioning + * @description Positive conditioning tensor * @default null */ - prompt?: string | null; + positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; /** - * Max Prompts - * @description The number of prompts to generate - * @default 1 + * Negative Conditioning + * @description Negative conditioning tensor + * @default null */ - max_prompts?: number; + negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; /** - * Combinatorial - * @description Whether to use the combinatorial generator - * @default false + * @description Noise tensor + * @default null */ - combinatorial?: boolean; + noise?: components["schemas"]["LatentsField"] | null; /** - * type - * @default dynamic_prompt - * @constant + * Steps + * @description Number of steps to run + * @default 10 */ - type: "dynamic_prompt"; - }; - /** DynamicPromptsResponse */ - DynamicPromptsResponse: { - /** Prompts */ - prompts: string[]; - /** Error */ - error?: string | null; - }; - /** - * Upscale (RealESRGAN) - * @description Upscales an image using RealESRGAN. - */ - ESRGANInvocation: { + steps?: number; /** - * @description The board to save the image to - * @default null + * CFG Scale + * @description Classifier-Free Guidance scale + * @default 7.5 */ - board?: components["schemas"]["BoardField"] | null; + cfg_scale?: number | number[]; /** - * @description Optional metadata to be saved with the image - * @default null + * Denoising Start + * @description When to start denoising, expressed a percentage of total steps + * @default 0 */ - metadata?: components["schemas"]["MetadataField"] | null; + denoising_start?: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Denoising End + * @description When to stop denoising, expressed a percentage of total steps + * @default 1 */ - id: string; + denoising_end?: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Scheduler + * @description Scheduler to use during inference + * @default euler + * @enum {string} */ - is_intermediate?: boolean; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - use_cache?: boolean; + unet?: components["schemas"]["UNetField"] | null; /** - * @description The input image + * Control * @default null */ - image?: components["schemas"]["ImageField"] | null; + control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; /** - * Model Name - * @description The Real-ESRGAN model to use - * @default RealESRGAN_x4plus.pth - * @enum {string} + * IP-Adapter + * @description IP-Adapter to apply + * @default null */ - model_name?: "RealESRGAN_x4plus.pth" | "RealESRGAN_x4plus_anime_6B.pth" | "ESRGAN_SRx4_DF2KOST_official-ff704c30.pth" | "RealESRGAN_x2plus.pth"; + ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; /** - * Tile Size - * @description Tile size for tiled ESRGAN upscaling (0=tiling disabled) - * @default 400 + * T2I-Adapter + * @description T2I-Adapter(s) to apply + * @default null */ - tile_size?: number; + t2i_adapter?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; /** - * type - * @default esrgan - * @constant + * CFG Rescale Multiplier + * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR + * @default 0 */ - type: "esrgan"; - }; - /** Edge */ - Edge: { - /** @description The connection for the edge's from node and field */ - source: components["schemas"]["EdgeConnection"]; - /** @description The connection for the edge's to node and field */ - destination: components["schemas"]["EdgeConnection"]; - }; - /** EdgeConnection */ - EdgeConnection: { + cfg_rescale_multiplier?: number; /** - * Node Id - * @description The id of the node for this edge connection + * @description Latents tensor + * @default null */ - node_id: string; + latents?: components["schemas"]["LatentsField"] | null; /** - * Field - * @description The field for this connection + * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. + * @default null */ - field: string; - }; - /** EnqueueBatchResult */ - EnqueueBatchResult: { + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; /** - * Queue Id - * @description The ID of the queue + * type + * @default denoise_latents_meta + * @constant */ - queue_id: string; + type: "denoise_latents_meta"; + }; + /** + * DenoiseMaskField + * @description An inpaint mask field + */ + DenoiseMaskField: { /** - * Enqueued - * @description The total number of queue items enqueued + * Mask Name + * @description The name of the mask image */ - enqueued: number; + mask_name: string; /** - * Requested - * @description The total number of queue items requested to be enqueued + * Masked Latents Name + * @description The name of the masked image latents + * @default null */ - requested: number; - /** @description The batch that was enqueued */ - batch: components["schemas"]["Batch"]; + masked_latents_name?: string | null; /** - * Priority - * @description The priority of the enqueued batch + * Gradient + * @description Used for gradient inpainting + * @default false */ - priority: number; + gradient?: boolean; + }; + /** + * DenoiseMaskOutput + * @description Base class for nodes that output a single image + */ + DenoiseMaskOutput: { + /** @description Mask for denoise model run */ + denoise_mask: components["schemas"]["DenoiseMaskField"]; /** - * Item Ids - * @description The IDs of the queue items that were enqueued + * type + * @default denoise_mask_output + * @constant */ - item_ids: number[]; + type: "denoise_mask_output"; }; /** - * Expand Mask with Fade - * @description Expands a mask with a fade effect. The mask uses black to indicate areas to keep from the generated image and white for areas to discard. - * The mask is thresholded to create a binary mask, and then a distance transform is applied to create a fade effect. - * The fade size is specified in pixels, and the mask is expanded by that amount. The result is a mask with a smooth transition from black to white. - * If the fade size is 0, the mask is returned as-is. + * Depth Anything Depth Estimation + * @description Generates a depth map using a Depth Anything model. */ - ExpandMaskWithFadeInvocation: { + DepthAnythingDepthEstimationInvocation: { /** * @description The board to save the image to * @default null @@ -7128,41 +6917,29 @@ export type components = { */ use_cache?: boolean; /** - * @description The mask to expand + * @description The image to process * @default null */ - mask?: components["schemas"]["ImageField"] | null; - /** - * Threshold - * @description The threshold for the binary mask (0-255) - * @default 0 - */ - threshold?: number; + image?: components["schemas"]["ImageField"] | null; /** - * Fade Size Px - * @description The size of the fade in pixels - * @default 32 + * Model Size + * @description The size of the depth model to use + * @default small_v2 + * @enum {string} */ - fade_size_px?: number; + model_size?: "large" | "base" | "small" | "small_v2"; /** * type - * @default expand_mask_with_fade + * @default depth_anything_depth_estimation * @constant */ - type: "expand_mask_with_fade"; - }; - /** ExposedField */ - ExposedField: { - /** Nodeid */ - nodeId: string; - /** Fieldname */ - fieldName: string; + type: "depth_anything_depth_estimation"; }; /** - * Apply LoRA Collection - FLUX - * @description Applies a collection of LoRAs to a FLUX transformer. + * Divide Integers + * @description Divides two numbers */ - FLUXLoRACollectionLoader: { + DivideInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7181,351 +6958,237 @@ export type components = { */ use_cache?: boolean; /** - * LoRAs - * @description LoRA models and weights. May be a single LoRA or collection. - * @default null + * A + * @description The first number + * @default 0 */ - loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; + a?: number; /** - * Transformer - * @description Transformer - * @default null - */ - transformer?: components["schemas"]["TransformerField"] | null; - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip?: components["schemas"]["CLIPField"] | null; - /** - * T5 Encoder - * @description T5 tokenizer and text encoder - * @default null + * B + * @description The second number + * @default 0 */ - t5_encoder?: components["schemas"]["T5EncoderField"] | null; + b?: number; /** * type - * @default flux_lora_collection_loader + * @default div * @constant */ - type: "flux_lora_collection_loader"; + type: "div"; }; /** - * FaceIdentifier - * @description Outputs an image with detected face IDs printed on each face. For use with other FaceTools. + * DownloadCancelledEvent + * @description Event model for download_cancelled */ - FaceIdentifierInvocation: { + DownloadCancelledEvent: { /** - * @description The board to save the image to - * @default null + * Timestamp + * @description The timestamp of the event */ - board?: components["schemas"]["BoardField"] | null; + timestamp: number; /** - * @description Optional metadata to be saved with the image - * @default null + * Source + * @description The source of the download */ - metadata?: components["schemas"]["MetadataField"] | null; + source: string; + }; + /** + * DownloadCompleteEvent + * @description Event model for download_complete + */ + DownloadCompleteEvent: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Timestamp + * @description The timestamp of the event */ - id: string; + timestamp: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source + * @description The source of the download */ - is_intermediate?: boolean; + source: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Download Path + * @description The local path where the download is saved */ - use_cache?: boolean; + download_path: string; /** - * @description Image to face detect - * @default null + * Total Bytes + * @description The total number of bytes downloaded */ - image?: components["schemas"]["ImageField"] | null; + total_bytes: number; + }; + /** + * DownloadErrorEvent + * @description Event model for download_error + */ + DownloadErrorEvent: { /** - * Minimum Confidence - * @description Minimum confidence for face detection (lower if detection is failing) - * @default 0.5 + * Timestamp + * @description The timestamp of the event */ - minimum_confidence?: number; + timestamp: number; /** - * Chunk - * @description Whether to bypass full image face detection and default to image chunking. Chunking will occur if no faces are found in the full image. - * @default false + * Source + * @description The source of the download */ - chunk?: boolean; + source: string; /** - * type - * @default face_identifier - * @constant + * Error Type + * @description The type of error */ - type: "face_identifier"; + error_type: string; + /** + * Error + * @description The error message + */ + error: string; }; /** - * FaceMask - * @description Face mask creation using mediapipe face detection + * DownloadJob + * @description Class to monitor and control a model download request. */ - FaceMaskInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + DownloadJob: { /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description Numeric ID of this job + * @default -1 */ - id: string; + id?: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Dest + * Format: path + * @description Initial destination of downloaded model on local disk; a directory or file path */ - is_intermediate?: boolean; + dest: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Download Path + * @description Final location of downloaded file or directory */ - use_cache?: boolean; + download_path?: string | null; /** - * @description Image to face detect - * @default null + * @description Status of the download + * @default waiting */ - image?: components["schemas"]["ImageField"] | null; + status?: components["schemas"]["DownloadJobStatus"]; /** - * Face Ids - * @description Comma-separated list of face ids to mask eg '0,2,7'. Numbered from 0. Leave empty to mask all. Find face IDs with FaceIdentifier node. - * @default + * Bytes + * @description Bytes downloaded so far + * @default 0 */ - face_ids?: string; + bytes?: number; /** - * Minimum Confidence - * @description Minimum confidence for face detection (lower if detection is failing) - * @default 0.5 + * Total Bytes + * @description Total file size (bytes) + * @default 0 */ - minimum_confidence?: number; + total_bytes?: number; /** - * X Offset - * @description Offset for the X-axis of the face mask - * @default 0 + * Error Type + * @description Name of exception that caused an error */ - x_offset?: number; + error_type?: string | null; /** - * Y Offset - * @description Offset for the Y-axis of the face mask - * @default 0 + * Error + * @description Traceback of the exception that caused an error */ - y_offset?: number; + error?: string | null; /** - * Chunk - * @description Whether to bypass full image face detection and default to image chunking. Chunking will occur if no faces are found in the full image. - * @default false + * Source + * Format: uri + * @description Where to download from. Specific types specified in child classes. */ - chunk?: boolean; + source: string; /** - * Invert Mask - * @description Toggle to invert the mask - * @default false + * Access Token + * @description authorization token for protected resources */ - invert_mask?: boolean; + access_token?: string | null; /** - * type - * @default face_mask_detection - * @constant + * Priority + * @description Queue priority; lower values are higher priority + * @default 10 */ - type: "face_mask_detection"; - }; - /** - * FaceMaskOutput - * @description Base class for FaceMask output - */ - FaceMaskOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; + priority?: number; /** - * Width - * @description The width of the image in pixels + * Job Started + * @description Timestamp for when the download job started */ - width: number; + job_started?: string | null; /** - * Height - * @description The height of the image in pixels + * Job Ended + * @description Timestamp for when the download job ende1d (completed or errored) */ - height: number; + job_ended?: string | null; /** - * type - * @default face_mask_output - * @constant + * Content Type + * @description Content type of downloaded file */ - type: "face_mask_output"; - /** @description The output mask */ - mask: components["schemas"]["ImageField"]; + content_type?: string | null; }; /** - * FaceOff - * @description Bound, extract, and mask a face from an image using MediaPipe detection + * DownloadJobStatus + * @description State of a download job. + * @enum {string} */ - FaceOffInvocation: { + DownloadJobStatus: "waiting" | "running" | "completed" | "cancelled" | "error"; + /** + * DownloadProgressEvent + * @description Event model for download_progress + */ + DownloadProgressEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source + * @description The source of the download */ - id: string; + source: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Download Path + * @description The local path where the download is saved */ - is_intermediate?: boolean; + download_path: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * @description Image for face detection - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * Face Id - * @description The face ID to process, numbered from 0. Multiple faces not supported. Find a face's ID with FaceIdentifier node. - * @default 0 - */ - face_id?: number; - /** - * Minimum Confidence - * @description Minimum confidence for face detection (lower if detection is failing) - * @default 0.5 - */ - minimum_confidence?: number; - /** - * X Offset - * @description X-axis offset of the mask - * @default 0 - */ - x_offset?: number; - /** - * Y Offset - * @description Y-axis offset of the mask - * @default 0 - */ - y_offset?: number; - /** - * Padding - * @description All-axis padding around the mask in pixels - * @default 0 - */ - padding?: number; - /** - * Chunk - * @description Whether to bypass full image face detection and default to image chunking. Chunking will occur if no faces are found in the full image. - * @default false + * Current Bytes + * @description The number of bytes downloaded so far */ - chunk?: boolean; + current_bytes: number; /** - * type - * @default face_off - * @constant + * Total Bytes + * @description The total number of bytes to be downloaded */ - type: "face_off"; + total_bytes: number; }; /** - * FaceOffOutput - * @description Base class for FaceOff Output + * DownloadStartedEvent + * @description Event model for download_started */ - FaceOffOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; - /** - * Width - * @description The width of the image in pixels - */ - width: number; - /** - * Height - * @description The height of the image in pixels - */ - height: number; - /** - * type - * @default face_off_output - * @constant - */ - type: "face_off_output"; - /** @description The output mask */ - mask: components["schemas"]["ImageField"]; - /** - * X - * @description The x coordinate of the bounding box's left side - */ - x: number; - /** - * Y - * @description The y coordinate of the bounding box's top side - */ - y: number; - }; - /** FieldIdentifier */ - FieldIdentifier: { - /** - * Kind - * @description The kind of field - * @enum {string} - */ - kind: "input" | "output"; + DownloadStartedEvent: { /** - * Node Id - * @description The ID of the node + * Timestamp + * @description The timestamp of the event */ - node_id: string; + timestamp: number; /** - * Field Name - * @description The name of the field + * Source + * @description The source of the download */ - field_name: string; + source: string; /** - * User Label - * @description The user label of the field, if any + * Download Path + * @description The local path where the download is saved */ - user_label: string | null; + download_path: string; }; /** - * FieldKind - * @description The kind of field. - * - `Input`: An input field on a node. - * - `Output`: An output field on a node. - * - `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is - * one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name - * "metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic, - * allowing "metadata" for that field. - * - `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs, - * but which are used to store information about the node. For example, the `id` and `type` fields are node - * attributes. - * - * The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app - * startup, and when generating the OpenAPI schema for the workflow editor. - * @enum {string} - */ - FieldKind: "input" | "output" | "internal" | "node_attribute"; - /** - * Float Batch - * @description Create a batched generation, where the workflow is executed once for each float in the batch. + * Dynamic Prompt + * @description Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator */ - FloatBatchInvocation: { + DynamicPromptInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7540,34 +7203,56 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default true + * @default false */ use_cache?: boolean; /** - * Batch Group - * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. - * @default None - * @enum {string} + * Prompt + * @description The prompt to parse with dynamicprompts + * @default null */ - batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; + prompt?: string | null; /** - * Floats - * @description The floats to batch over - * @default null + * Max Prompts + * @description The number of prompts to generate + * @default 1 */ - floats?: number[] | null; + max_prompts?: number; + /** + * Combinatorial + * @description Whether to use the combinatorial generator + * @default false + */ + combinatorial?: boolean; /** * type - * @default float_batch + * @default dynamic_prompt * @constant */ - type: "float_batch"; + type: "dynamic_prompt"; + }; + /** DynamicPromptsResponse */ + DynamicPromptsResponse: { + /** Prompts */ + prompts: string[]; + /** Error */ + error?: string | null; }; /** - * Float Collection Primitive - * @description A collection of float primitive values + * Upscale (RealESRGAN) + * @description Upscales an image using RealESRGAN. */ - FloatCollectionInvocation: { + ESRGANInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7586,93 +7271,98 @@ export type components = { */ use_cache?: boolean; /** - * Collection - * @description The collection of float values - * @default [] + * @description The input image + * @default null */ - collection?: number[]; + image?: components["schemas"]["ImageField"] | null; /** - * type - * @default float_collection - * @constant + * Model Name + * @description The Real-ESRGAN model to use + * @default RealESRGAN_x4plus.pth + * @enum {string} */ - type: "float_collection"; - }; - /** - * FloatCollectionOutput - * @description Base class for nodes that output a collection of floats - */ - FloatCollectionOutput: { + model_name?: "RealESRGAN_x4plus.pth" | "RealESRGAN_x4plus_anime_6B.pth" | "ESRGAN_SRx4_DF2KOST_official-ff704c30.pth" | "RealESRGAN_x2plus.pth"; /** - * Collection - * @description The float collection + * Tile Size + * @description Tile size for tiled ESRGAN upscaling (0=tiling disabled) + * @default 400 */ - collection: number[]; + tile_size?: number; /** * type - * @default float_collection_output + * @default esrgan * @constant */ - type: "float_collection_output"; + type: "esrgan"; }; - /** - * Float Generator - * @description Generated a range of floats for use in a batched generation - */ - FloatGenerator: { + /** Edge */ + Edge: { + /** @description The connection for the edge's from node and field */ + source: components["schemas"]["EdgeConnection"]; + /** @description The connection for the edge's to node and field */ + destination: components["schemas"]["EdgeConnection"]; + }; + /** EdgeConnection */ + EdgeConnection: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Node Id + * @description The id of the node for this edge connection */ - id: string; + node_id: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Field + * @description The field for this connection */ - is_intermediate?: boolean; + field: string; + }; + /** EnqueueBatchResult */ + EnqueueBatchResult: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Queue Id + * @description The ID of the queue */ - use_cache?: boolean; + queue_id: string; /** - * Generator Type - * @description The float generator. + * Enqueued + * @description The total number of queue items enqueued */ - generator: components["schemas"]["FloatGeneratorField"]; + enqueued: number; /** - * type - * @default float_generator - * @constant + * Requested + * @description The total number of queue items requested to be enqueued */ - type: "float_generator"; - }; - /** FloatGeneratorField */ - FloatGeneratorField: Record; - /** - * FloatGeneratorOutput - * @description Base class for nodes that output a collection of floats - */ - FloatGeneratorOutput: { + requested: number; + /** @description The batch that was enqueued */ + batch: components["schemas"]["Batch"]; /** - * Floats - * @description The generated floats + * Priority + * @description The priority of the enqueued batch */ - floats: number[]; + priority: number; /** - * type - * @default float_generator_output - * @constant + * Item Ids + * @description The IDs of the queue items that were enqueued */ - type: "float_generator_output"; + item_ids: number[]; }; /** - * Float Primitive - * @description A float primitive value + * Expand Mask with Fade + * @description Expands a mask with a fade effect. The mask uses black to indicate areas to keep from the generated image and white for areas to discard. + * The mask is thresholded to create a binary mask, and then a distance transform is applied to create a fade effect. + * The fade size is specified in pixels, and the mask is expanded by that amount. The result is a mask with a smooth transition from black to white. + * If the fade size is 0, the mask is returned as-is. */ - FloatInvocation: { + ExpandMaskWithFadeInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7691,23 +7381,41 @@ export type components = { */ use_cache?: boolean; /** - * Value - * @description The float value + * @description The mask to expand + * @default null + */ + mask?: components["schemas"]["ImageField"] | null; + /** + * Threshold + * @description The threshold for the binary mask (0-255) * @default 0 */ - value?: number; + threshold?: number; + /** + * Fade Size Px + * @description The size of the fade in pixels + * @default 32 + */ + fade_size_px?: number; /** * type - * @default float + * @default expand_mask_with_fade * @constant */ - type: "float"; + type: "expand_mask_with_fade"; + }; + /** ExposedField */ + ExposedField: { + /** Nodeid */ + nodeId: string; + /** Fieldname */ + fieldName: string; }; /** - * Float Range - * @description Creates a range + * Apply LoRA Collection - FLUX + * @description Applies a collection of LoRAs to a FLUX transformer. */ - FloatLinearRangeInvocation: { + FLUXLoRACollectionLoader: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7726,100 +7434,127 @@ export type components = { */ use_cache?: boolean; /** - * Start - * @description The first value of the range - * @default 5 + * LoRAs + * @description LoRA models and weights. May be a single LoRA or collection. + * @default null */ - start?: number; + loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; /** - * Stop - * @description The last value of the range - * @default 10 + * Transformer + * @description Transformer + * @default null */ - stop?: number; + transformer?: components["schemas"]["TransformerField"] | null; /** - * Steps - * @description number of values to interpolate over (including start and stop) - * @default 30 + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - steps?: number; + clip?: components["schemas"]["CLIPField"] | null; + /** + * T5 Encoder + * @description T5 tokenizer and text encoder + * @default null + */ + t5_encoder?: components["schemas"]["T5EncoderField"] | null; /** * type - * @default float_range + * @default flux_lora_collection_loader * @constant */ - type: "float_range"; + type: "flux_lora_collection_loader"; }; /** - * Float Math - * @description Performs floating point math. + * FLUXRedux_Checkpoint_Config + * @description Model config for FLUX Tools Redux model. */ - FloatMathInvocation: { + FLUXRedux_Checkpoint_Config: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Key + * @description A unique key for this model. */ - id: string; + key: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Hash + * @description The hash of the model file(s). */ - is_intermediate?: boolean; + hash: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - use_cache?: boolean; + path: string; /** - * Operation - * @description The operation to perform - * @default ADD - * @enum {string} + * File Size + * @description The size of the model in bytes. */ - operation?: "ADD" | "SUB" | "MUL" | "DIV" | "EXP" | "ABS" | "SQRT" | "MIN" | "MAX"; + file_size: number; /** - * A - * @description The first number - * @default 1 + * Name + * @description Name of the model. */ - a?: number; + name: string; /** - * B - * @description The second number - * @default 1 + * Description + * @description Model description */ - b?: number; + description: string | null; /** - * type - * @default float_math + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default flux_redux * @constant */ - type: "float_math"; - }; - /** - * FloatOutput - * @description Base class for nodes that output a single float - */ - FloatOutput: { + type: "flux_redux"; /** - * Value - * @description The output float + * Format + * @default checkpoint + * @constant */ - value: number; + format: "checkpoint"; /** - * type - * @default float_output + * Base + * @default flux * @constant */ - type: "float_output"; + base: "flux"; }; /** - * Float To Integer - * @description Rounds a float number to (a multiple of) an integer. + * FaceIdentifier + * @description Outputs an image with detected face IDs printed on each face. For use with other FaceTools. */ - FloatToIntegerInvocation: { + FaceIdentifierInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7838,83 +7573,39 @@ export type components = { */ use_cache?: boolean; /** - * Value - * @description The value to round - * @default 0 + * @description Image to face detect + * @default null */ - value?: number; + image?: components["schemas"]["ImageField"] | null; /** - * Multiple of - * @description The multiple to round to - * @default 1 + * Minimum Confidence + * @description Minimum confidence for face detection (lower if detection is failing) + * @default 0.5 */ - multiple?: number; + minimum_confidence?: number; /** - * Method - * @description The method to use for rounding - * @default Nearest - * @enum {string} + * Chunk + * @description Whether to bypass full image face detection and default to image chunking. Chunking will occur if no faces are found in the full image. + * @default false */ - method?: "Nearest" | "Floor" | "Ceiling" | "Truncate"; + chunk?: boolean; /** * type - * @default float_to_int - * @constant - */ - type: "float_to_int"; - }; - /** - * FluxConditioningCollectionOutput - * @description Base class for nodes that output a collection of conditioning tensors - */ - FluxConditioningCollectionOutput: { - /** - * Collection - * @description The output conditioning tensors - */ - collection: components["schemas"]["FluxConditioningField"][]; - /** - * type - * @default flux_conditioning_collection_output + * @default face_identifier * @constant */ - type: "flux_conditioning_collection_output"; + type: "face_identifier"; }; /** - * FluxConditioningField - * @description A conditioning tensor primitive value + * FaceMask + * @description Face mask creation using mediapipe face detection */ - FluxConditioningField: { - /** - * Conditioning Name - * @description The name of conditioning tensor - */ - conditioning_name: string; + FaceMaskInvocation: { /** - * @description The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True. + * @description Optional metadata to be saved with the image * @default null */ - mask?: components["schemas"]["TensorField"] | null; - }; - /** - * FluxConditioningOutput - * @description Base class for nodes that output a single conditioning tensor - */ - FluxConditioningOutput: { - /** @description Conditioning tensor */ - conditioning: components["schemas"]["FluxConditioningField"]; - /** - * type - * @default flux_conditioning_output - * @constant - */ - type: "flux_conditioning_output"; - }; - /** - * Control LoRA - FLUX - * @description LoRA model and Image to use with FLUX transformer generation. - */ - FluxControlLoRALoaderInvocation: { + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -7933,90 +7624,89 @@ export type components = { */ use_cache?: boolean; /** - * Control LoRA - * @description Control LoRA model to load + * @description Image to face detect * @default null */ - lora?: components["schemas"]["ModelIdentifierField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * @description The image to encode. - * @default null + * Face Ids + * @description Comma-separated list of face ids to mask eg '0,2,7'. Numbered from 0. Leave empty to mask all. Find face IDs with FaceIdentifier node. + * @default */ - image?: components["schemas"]["ImageField"] | null; + face_ids?: string; /** - * Weight - * @description The weight of the LoRA. - * @default 1 + * Minimum Confidence + * @description Minimum confidence for face detection (lower if detection is failing) + * @default 0.5 */ - weight?: number; + minimum_confidence?: number; /** - * type - * @default flux_control_lora_loader - * @constant + * X Offset + * @description Offset for the X-axis of the face mask + * @default 0 */ - type: "flux_control_lora_loader"; - }; - /** - * FluxControlLoRALoaderOutput - * @description Flux Control LoRA Loader Output - */ - FluxControlLoRALoaderOutput: { + x_offset?: number; /** - * Flux Control LoRA - * @description Control LoRAs to apply on model loading - * @default null + * Y Offset + * @description Offset for the Y-axis of the face mask + * @default 0 */ - control_lora: components["schemas"]["ControlLoRAField"]; + y_offset?: number; /** - * type - * @default flux_control_lora_loader_output - * @constant + * Chunk + * @description Whether to bypass full image face detection and default to image chunking. Chunking will occur if no faces are found in the full image. + * @default false */ - type: "flux_control_lora_loader_output"; - }; - /** FluxControlNetField */ - FluxControlNetField: { - /** @description The control image */ - image: components["schemas"]["ImageField"]; - /** @description The ControlNet model to use */ - control_model: components["schemas"]["ModelIdentifierField"]; + chunk?: boolean; /** - * Control Weight - * @description The weight given to the ControlNet - * @default 1 + * Invert Mask + * @description Toggle to invert the mask + * @default false */ - control_weight?: number | number[]; + invert_mask?: boolean; /** - * Begin Step Percent - * @description When the ControlNet is first applied (% of total steps) - * @default 0 + * type + * @default face_mask_detection + * @constant */ - begin_step_percent?: number; + type: "face_mask_detection"; + }; + /** + * FaceMaskOutput + * @description Base class for FaceMask output + */ + FaceMaskOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; /** - * End Step Percent - * @description When the ControlNet is last applied (% of total steps) - * @default 1 + * Width + * @description The width of the image in pixels */ - end_step_percent?: number; + width: number; /** - * Resize Mode - * @description The resize mode to use - * @default just_resize - * @enum {string} + * Height + * @description The height of the image in pixels */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + height: number; /** - * Instantx Control Mode - * @description The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'. - * @default -1 + * type + * @default face_mask_output + * @constant */ - instantx_control_mode?: number | null; + type: "face_mask_output"; + /** @description The output mask */ + mask: components["schemas"]["ImageField"]; }; /** - * FLUX ControlNet - * @description Collect FLUX ControlNet info to pass to other nodes. + * FaceOff + * @description Bound, extract, and mask a face from an image using MediaPipe detection */ - FluxControlNetInvocation: { + FaceOffInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8035,240 +7725,230 @@ export type components = { */ use_cache?: boolean; /** - * @description The control image + * @description Image for face detection * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * @description ControlNet model to load - * @default null + * Face Id + * @description The face ID to process, numbered from 0. Multiple faces not supported. Find a face's ID with FaceIdentifier node. + * @default 0 */ - control_model?: components["schemas"]["ModelIdentifierField"] | null; + face_id?: number; /** - * Control Weight - * @description The weight given to the ControlNet - * @default 1 + * Minimum Confidence + * @description Minimum confidence for face detection (lower if detection is failing) + * @default 0.5 */ - control_weight?: number | number[]; + minimum_confidence?: number; /** - * Begin Step Percent - * @description When the ControlNet is first applied (% of total steps) + * X Offset + * @description X-axis offset of the mask * @default 0 */ - begin_step_percent?: number; + x_offset?: number; /** - * End Step Percent - * @description When the ControlNet is last applied (% of total steps) - * @default 1 + * Y Offset + * @description Y-axis offset of the mask + * @default 0 */ - end_step_percent?: number; + y_offset?: number; /** - * Resize Mode - * @description The resize mode used - * @default just_resize - * @enum {string} + * Padding + * @description All-axis padding around the mask in pixels + * @default 0 */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + padding?: number; /** - * Instantx Control Mode - * @description The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'. - * @default -1 + * Chunk + * @description Whether to bypass full image face detection and default to image chunking. Chunking will occur if no faces are found in the full image. + * @default false */ - instantx_control_mode?: number | null; + chunk?: boolean; /** * type - * @default flux_controlnet + * @default face_off * @constant */ - type: "flux_controlnet"; + type: "face_off"; }; /** - * FluxControlNetOutput - * @description FLUX ControlNet info + * FaceOffOutput + * @description Base class for FaceOff Output */ - FluxControlNetOutput: { - /** @description ControlNet(s) to apply */ - control: components["schemas"]["FluxControlNetField"]; + FaceOffOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; /** - * type - * @default flux_controlnet_output - * @constant + * Width + * @description The width of the image in pixels */ - type: "flux_controlnet_output"; - }; - /** - * FLUX Denoise - * @description Run denoising process with a FLUX transformer model. - */ - FluxDenoiseInvocation: { + width: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Height + * @description The height of the image in pixels */ - id: string; + height: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * type + * @default face_off_output + * @constant */ - use_cache?: boolean; + type: "face_off_output"; + /** @description The output mask */ + mask: components["schemas"]["ImageField"]; /** - * @description Latents tensor - * @default null + * X + * @description The x coordinate of the bounding box's left side */ - latents?: components["schemas"]["LatentsField"] | null; + x: number; /** - * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. - * @default null + * Y + * @description The y coordinate of the bounding box's top side */ - denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + y: number; + }; + /** FieldIdentifier */ + FieldIdentifier: { /** - * Denoising Start - * @description When to start denoising, expressed a percentage of total steps - * @default 0 + * Kind + * @description The kind of field + * @enum {string} */ - denoising_start?: number; + kind: "input" | "output"; /** - * Denoising End - * @description When to stop denoising, expressed a percentage of total steps - * @default 1 + * Node Id + * @description The ID of the node */ - denoising_end?: number; + node_id: string; /** - * Add Noise - * @description Add noise based on denoising start. - * @default true + * Field Name + * @description The name of the field */ - add_noise?: boolean; + field_name: string; /** - * Transformer - * @description Flux model (Transformer) to load - * @default null + * User Label + * @description The user label of the field, if any */ - transformer?: components["schemas"]["TransformerField"] | null; + user_label: string | null; + }; + /** + * FieldKind + * @description The kind of field. + * - `Input`: An input field on a node. + * - `Output`: An output field on a node. + * - `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is + * one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name + * "metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic, + * allowing "metadata" for that field. + * - `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs, + * but which are used to store information about the node. For example, the `id` and `type` fields are node + * attributes. + * + * The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app + * startup, and when generating the OpenAPI schema for the workflow editor. + * @enum {string} + */ + FieldKind: "input" | "output" | "internal" | "node_attribute"; + /** + * Float Batch + * @description Create a batched generation, where the workflow is executed once for each float in the batch. + */ + FloatBatchInvocation: { /** - * Control LoRA - * @description Control LoRA model to load - * @default null + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - control_lora?: components["schemas"]["ControlLoRAField"] | null; + id: string; /** - * Positive Text Conditioning - * @description Positive conditioning tensor - * @default null + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - positive_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; + is_intermediate?: boolean; /** - * Negative Text Conditioning - * @description Negative conditioning tensor. Can be None if cfg_scale is 1.0. - * @default null + * Use Cache + * @description Whether or not to use the cache + * @default true */ - negative_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; + use_cache?: boolean; /** - * Redux Conditioning - * @description FLUX Redux conditioning tensor. - * @default null + * Batch Group + * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. + * @default None + * @enum {string} */ - redux_conditioning?: components["schemas"]["FluxReduxConditioningField"] | components["schemas"]["FluxReduxConditioningField"][] | null; + batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; /** - * @description FLUX Fill conditioning. + * Floats + * @description The floats to batch over * @default null */ - fill_conditioning?: components["schemas"]["FluxFillConditioningField"] | null; - /** - * CFG Scale - * @description Classifier-Free Guidance scale - * @default 1 - */ - cfg_scale?: number | number[]; - /** - * CFG Scale Start Step - * @description Index of the first step to apply cfg_scale. Negative indices count backwards from the the last step (e.g. a value of -1 refers to the final step). - * @default 0 - */ - cfg_scale_start_step?: number; - /** - * CFG Scale End Step - * @description Index of the last step to apply cfg_scale. Negative indices count backwards from the last step (e.g. a value of -1 refers to the final step). - * @default -1 - */ - cfg_scale_end_step?: number; - /** - * Width - * @description Width of the generated image. - * @default 1024 - */ - width?: number; - /** - * Height - * @description Height of the generated image. - * @default 1024 - */ - height?: number; + floats?: number[] | null; /** - * Num Steps - * @description Number of diffusion steps. Recommended values are schnell: 4, dev: 50. - * @default 4 + * type + * @default float_batch + * @constant */ - num_steps?: number; + type: "float_batch"; + }; + /** + * Float Collection Primitive + * @description A collection of float primitive values + */ + FloatCollectionInvocation: { /** - * Guidance - * @description The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell. - * @default 4 + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - guidance?: number; + id: string; /** - * Seed - * @description Randomness seed for reproducibility. - * @default 0 + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - seed?: number; + is_intermediate?: boolean; /** - * Control - * @description ControlNet models. - * @default null + * Use Cache + * @description Whether or not to use the cache + * @default true */ - control?: components["schemas"]["FluxControlNetField"] | components["schemas"]["FluxControlNetField"][] | null; + use_cache?: boolean; /** - * @description VAE - * @default null + * Collection + * @description The collection of float values + * @default [] */ - controlnet_vae?: components["schemas"]["VAEField"] | null; + collection?: number[]; /** - * IP-Adapter - * @description IP-Adapter to apply - * @default null + * type + * @default float_collection + * @constant */ - ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + type: "float_collection"; + }; + /** + * FloatCollectionOutput + * @description Base class for nodes that output a collection of floats + */ + FloatCollectionOutput: { /** - * Kontext Conditioning - * @description FLUX Kontext conditioning (reference image). - * @default null + * Collection + * @description The float collection */ - kontext_conditioning?: components["schemas"]["FluxKontextConditioningField"] | components["schemas"]["FluxKontextConditioningField"][] | null; + collection: number[]; /** * type - * @default flux_denoise + * @default float_collection_output * @constant */ - type: "flux_denoise"; + type: "float_collection_output"; }; /** - * FLUX Denoise + Metadata - * @description Run denoising process with a FLUX transformer model + metadata. + * Float Generator + * @description Generated a range of floats for use in a batched generation */ - FluxDenoiseLatentsMetaInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + FloatGenerator: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8287,161 +7967,123 @@ export type components = { */ use_cache?: boolean; /** - * @description Latents tensor - * @default null + * Generator Type + * @description The float generator. */ - latents?: components["schemas"]["LatentsField"] | null; + generator: components["schemas"]["FloatGeneratorField"]; /** - * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. - * @default null + * type + * @default float_generator + * @constant */ - denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + type: "float_generator"; + }; + /** FloatGeneratorField */ + FloatGeneratorField: Record; + /** + * FloatGeneratorOutput + * @description Base class for nodes that output a collection of floats + */ + FloatGeneratorOutput: { /** - * Denoising Start - * @description When to start denoising, expressed a percentage of total steps - * @default 0 + * Floats + * @description The generated floats */ - denoising_start?: number; + floats: number[]; /** - * Denoising End - * @description When to stop denoising, expressed a percentage of total steps - * @default 1 - */ - denoising_end?: number; - /** - * Add Noise - * @description Add noise based on denoising start. - * @default true - */ - add_noise?: boolean; - /** - * Transformer - * @description Flux model (Transformer) to load - * @default null - */ - transformer?: components["schemas"]["TransformerField"] | null; - /** - * Control LoRA - * @description Control LoRA model to load - * @default null - */ - control_lora?: components["schemas"]["ControlLoRAField"] | null; - /** - * Positive Text Conditioning - * @description Positive conditioning tensor - * @default null - */ - positive_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; - /** - * Negative Text Conditioning - * @description Negative conditioning tensor. Can be None if cfg_scale is 1.0. - * @default null + * type + * @default float_generator_output + * @constant */ - negative_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; + type: "float_generator_output"; + }; + /** + * Float Primitive + * @description A float primitive value + */ + FloatInvocation: { /** - * Redux Conditioning - * @description FLUX Redux conditioning tensor. - * @default null + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - redux_conditioning?: components["schemas"]["FluxReduxConditioningField"] | components["schemas"]["FluxReduxConditioningField"][] | null; + id: string; /** - * @description FLUX Fill conditioning. - * @default null + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - fill_conditioning?: components["schemas"]["FluxFillConditioningField"] | null; + is_intermediate?: boolean; /** - * CFG Scale - * @description Classifier-Free Guidance scale - * @default 1 + * Use Cache + * @description Whether or not to use the cache + * @default true */ - cfg_scale?: number | number[]; + use_cache?: boolean; /** - * CFG Scale Start Step - * @description Index of the first step to apply cfg_scale. Negative indices count backwards from the the last step (e.g. a value of -1 refers to the final step). + * Value + * @description The float value * @default 0 */ - cfg_scale_start_step?: number; - /** - * CFG Scale End Step - * @description Index of the last step to apply cfg_scale. Negative indices count backwards from the last step (e.g. a value of -1 refers to the final step). - * @default -1 - */ - cfg_scale_end_step?: number; - /** - * Width - * @description Width of the generated image. - * @default 1024 - */ - width?: number; - /** - * Height - * @description Height of the generated image. - * @default 1024 - */ - height?: number; + value?: number; /** - * Num Steps - * @description Number of diffusion steps. Recommended values are schnell: 4, dev: 50. - * @default 4 + * type + * @default float + * @constant */ - num_steps?: number; + type: "float"; + }; + /** + * Float Range + * @description Creates a range + */ + FloatLinearRangeInvocation: { /** - * Guidance - * @description The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell. - * @default 4 + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - guidance?: number; + id: string; /** - * Seed - * @description Randomness seed for reproducibility. - * @default 0 + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - seed?: number; + is_intermediate?: boolean; /** - * Control - * @description ControlNet models. - * @default null + * Use Cache + * @description Whether or not to use the cache + * @default true */ - control?: components["schemas"]["FluxControlNetField"] | components["schemas"]["FluxControlNetField"][] | null; + use_cache?: boolean; /** - * @description VAE - * @default null + * Start + * @description The first value of the range + * @default 5 */ - controlnet_vae?: components["schemas"]["VAEField"] | null; + start?: number; /** - * IP-Adapter - * @description IP-Adapter to apply - * @default null + * Stop + * @description The last value of the range + * @default 10 */ - ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + stop?: number; /** - * Kontext Conditioning - * @description FLUX Kontext conditioning (reference image). - * @default null + * Steps + * @description number of values to interpolate over (including start and stop) + * @default 30 */ - kontext_conditioning?: components["schemas"]["FluxKontextConditioningField"] | components["schemas"]["FluxKontextConditioningField"][] | null; + steps?: number; /** * type - * @default flux_denoise_meta + * @default float_range * @constant */ - type: "flux_denoise_meta"; - }; - /** - * FluxFillConditioningField - * @description A FLUX Fill conditioning field. - */ - FluxFillConditioningField: { - /** @description The FLUX Fill reference image. */ - image: components["schemas"]["ImageField"]; - /** @description The FLUX Fill inpaint mask. */ - mask: components["schemas"]["TensorField"]; + type: "float_range"; }; /** - * FLUX Fill Conditioning - * @description Prepare the FLUX Fill conditioning data. + * Float Math + * @description Performs floating point math. */ - FluxFillInvocation: { + FloatMathInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8460,44 +8102,53 @@ export type components = { */ use_cache?: boolean; /** - * @description The FLUX Fill reference image. - * @default null + * Operation + * @description The operation to perform + * @default ADD + * @enum {string} */ - image?: components["schemas"]["ImageField"] | null; + operation?: "ADD" | "SUB" | "MUL" | "DIV" | "EXP" | "ABS" | "SQRT" | "MIN" | "MAX"; /** - * @description The bool inpainting mask. Excluded regions should be set to False, included regions should be set to True. - * @default null + * A + * @description The first number + * @default 1 */ - mask?: components["schemas"]["TensorField"] | null; + a?: number; + /** + * B + * @description The second number + * @default 1 + */ + b?: number; /** * type - * @default flux_fill + * @default float_math * @constant */ - type: "flux_fill"; + type: "float_math"; }; /** - * FluxFillOutput - * @description The conditioning output of a FLUX Fill invocation. + * FloatOutput + * @description Base class for nodes that output a single float */ - FluxFillOutput: { + FloatOutput: { /** - * Conditioning - * @description FLUX Redux conditioning tensor + * Value + * @description The output float */ - fill_cond: components["schemas"]["FluxFillConditioningField"]; + value: number; /** * type - * @default flux_fill_output + * @default float_output * @constant */ - type: "flux_fill_output"; + type: "float_output"; }; /** - * FLUX IP-Adapter - * @description Collects FLUX IP-Adapter info to pass to other nodes. + * Float To Integer + * @description Rounds a float number to (a multiple of) an integer. */ - FluxIPAdapterInvocation: { + FloatToIntegerInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8516,64 +8167,83 @@ export type components = { */ use_cache?: boolean; /** - * @description The IP-Adapter image prompt(s). - * @default null + * Value + * @description The value to round + * @default 0 */ - image?: components["schemas"]["ImageField"] | null; + value?: number; /** - * IP-Adapter Model - * @description The IP-Adapter model. - * @default null + * Multiple of + * @description The multiple to round to + * @default 1 */ - ip_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; + multiple?: number; /** - * Clip Vision Model - * @description CLIP Vision model to use. - * @default ViT-L - * @constant + * Method + * @description The method to use for rounding + * @default Nearest + * @enum {string} */ - clip_vision_model?: "ViT-L"; + method?: "Nearest" | "Floor" | "Ceiling" | "Truncate"; /** - * Weight - * @description The weight given to the IP-Adapter - * @default 1 + * type + * @default float_to_int + * @constant */ - weight?: number | number[]; + type: "float_to_int"; + }; + /** + * FluxConditioningCollectionOutput + * @description Base class for nodes that output a collection of conditioning tensors + */ + FluxConditioningCollectionOutput: { /** - * Begin Step Percent - * @description When the IP-Adapter is first applied (% of total steps) - * @default 0 - */ - begin_step_percent?: number; - /** - * End Step Percent - * @description When the IP-Adapter is last applied (% of total steps) - * @default 1 + * Collection + * @description The output conditioning tensors */ - end_step_percent?: number; + collection: components["schemas"]["FluxConditioningField"][]; /** * type - * @default flux_ip_adapter + * @default flux_conditioning_collection_output * @constant */ - type: "flux_ip_adapter"; + type: "flux_conditioning_collection_output"; }; /** - * FLUX Kontext Image Prep - * @description Prepares an image or images for use with FLUX Kontext. The first/single image is resized to the nearest - * preferred Kontext resolution. All other images are concatenated horizontally, maintaining their aspect ratio. + * FluxConditioningField + * @description A conditioning tensor primitive value */ - FluxKontextConcatenateImagesInvocation: { + FluxConditioningField: { /** - * @description The board to save the image to - * @default null + * Conditioning Name + * @description The name of conditioning tensor */ - board?: components["schemas"]["BoardField"] | null; + conditioning_name: string; /** - * @description Optional metadata to be saved with the image + * @description The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True. * @default null */ - metadata?: components["schemas"]["MetadataField"] | null; + mask?: components["schemas"]["TensorField"] | null; + }; + /** + * FluxConditioningOutput + * @description Base class for nodes that output a single conditioning tensor + */ + FluxConditioningOutput: { + /** @description Conditioning tensor */ + conditioning: components["schemas"]["FluxConditioningField"]; + /** + * type + * @default flux_conditioning_output + * @constant + */ + type: "flux_conditioning_output"; + }; + /** + * Control LoRA - FLUX + * @description LoRA model and Image to use with FLUX transformer generation. + */ + FluxControlLoRALoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8592,88 +8262,90 @@ export type components = { */ use_cache?: boolean; /** - * Images - * @description The images to concatenate + * Control LoRA + * @description Control LoRA model to load * @default null */ - images?: components["schemas"]["ImageField"][] | null; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** - * Use Preferred Resolution - * @description Use FLUX preferred resolutions for the first image - * @default true + * @description The image to encode. + * @default null */ - use_preferred_resolution?: boolean; + image?: components["schemas"]["ImageField"] | null; + /** + * Weight + * @description The weight of the LoRA. + * @default 1 + */ + weight?: number; /** * type - * @default flux_kontext_image_prep + * @default flux_control_lora_loader * @constant */ - type: "flux_kontext_image_prep"; - }; - /** - * FluxKontextConditioningField - * @description A conditioning field for FLUX Kontext (reference image). - */ - FluxKontextConditioningField: { - /** @description The Kontext reference image. */ - image: components["schemas"]["ImageField"]; + type: "flux_control_lora_loader"; }; /** - * Kontext Conditioning - FLUX - * @description Prepares a reference image for FLUX Kontext conditioning. + * FluxControlLoRALoaderOutput + * @description Flux Control LoRA Loader Output */ - FluxKontextInvocation: { + FluxControlLoRALoaderOutput: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Flux Control LoRA + * @description Control LoRAs to apply on model loading + * @default null */ - id: string; + control_lora: components["schemas"]["ControlLoRAField"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * type + * @default flux_control_lora_loader_output + * @constant */ - is_intermediate?: boolean; + type: "flux_control_lora_loader_output"; + }; + /** FluxControlNetField */ + FluxControlNetField: { + /** @description The control image */ + image: components["schemas"]["ImageField"]; + /** @description The ControlNet model to use */ + control_model: components["schemas"]["ModelIdentifierField"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Control Weight + * @description The weight given to the ControlNet + * @default 1 */ - use_cache?: boolean; + control_weight?: number | number[]; /** - * @description The Kontext reference image. - * @default null + * Begin Step Percent + * @description When the ControlNet is first applied (% of total steps) + * @default 0 */ - image?: components["schemas"]["ImageField"] | null; + begin_step_percent?: number; /** - * type - * @default flux_kontext - * @constant + * End Step Percent + * @description When the ControlNet is last applied (% of total steps) + * @default 1 */ - type: "flux_kontext"; - }; - /** - * FluxKontextOutput - * @description The conditioning output of a FLUX Kontext invocation. - */ - FluxKontextOutput: { + end_step_percent?: number; /** - * Kontext Conditioning - * @description FLUX Kontext conditioning (reference image) + * Resize Mode + * @description The resize mode to use + * @default just_resize + * @enum {string} */ - kontext_cond: components["schemas"]["FluxKontextConditioningField"]; + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * type - * @default flux_kontext_output - * @constant + * Instantx Control Mode + * @description The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'. + * @default -1 */ - type: "flux_kontext_output"; + instantx_control_mode?: number | null; }; /** - * Apply LoRA - FLUX - * @description Apply a LoRA model to a FLUX transformer and/or text encoder. + * FLUX ControlNet + * @description Collect FLUX ControlNet info to pass to other nodes. */ - FluxLoRALoaderInvocation: { + FluxControlNetInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8692,77 +8364,72 @@ export type components = { */ use_cache?: boolean; /** - * LoRA - * @description LoRA model to load + * @description The control image * @default null */ - lora?: components["schemas"]["ModelIdentifierField"] | null; - /** - * Weight - * @description The weight at which the LoRA is applied to each model - * @default 0.75 - */ - weight?: number; + image?: components["schemas"]["ImageField"] | null; /** - * FLUX Transformer - * @description Transformer + * @description ControlNet model to load * @default null */ - transformer?: components["schemas"]["TransformerField"] | null; + control_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Control Weight + * @description The weight given to the ControlNet + * @default 1 */ - clip?: components["schemas"]["CLIPField"] | null; + control_weight?: number | number[]; /** - * T5 Encoder - * @description T5 tokenizer and text encoder - * @default null + * Begin Step Percent + * @description When the ControlNet is first applied (% of total steps) + * @default 0 */ - t5_encoder?: components["schemas"]["T5EncoderField"] | null; + begin_step_percent?: number; /** - * type - * @default flux_lora_loader - * @constant + * End Step Percent + * @description When the ControlNet is last applied (% of total steps) + * @default 1 */ - type: "flux_lora_loader"; - }; - /** - * FluxLoRALoaderOutput - * @description FLUX LoRA Loader Output - */ - FluxLoRALoaderOutput: { + end_step_percent?: number; /** - * FLUX Transformer - * @description Transformer - * @default null + * Resize Mode + * @description The resize mode used + * @default just_resize + * @enum {string} */ - transformer: components["schemas"]["TransformerField"] | null; + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Instantx Control Mode + * @description The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'. + * @default -1 */ - clip: components["schemas"]["CLIPField"] | null; + instantx_control_mode?: number | null; /** - * T5 Encoder - * @description T5 tokenizer and text encoder - * @default null + * type + * @default flux_controlnet + * @constant */ - t5_encoder: components["schemas"]["T5EncoderField"] | null; + type: "flux_controlnet"; + }; + /** + * FluxControlNetOutput + * @description FLUX ControlNet info + */ + FluxControlNetOutput: { + /** @description ControlNet(s) to apply */ + control: components["schemas"]["FluxControlNetField"]; /** * type - * @default flux_lora_loader_output + * @default flux_controlnet_output * @constant */ - type: "flux_lora_loader_output"; + type: "flux_controlnet_output"; }; /** - * Main Model - FLUX - * @description Loads a flux base model, outputting its submodels. + * FLUX Denoise + * @description Run denoising process with a FLUX transformer model. */ - FluxModelLoaderInvocation: { + FluxDenoiseInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8780,166 +8447,157 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description Flux model (Transformer) to load */ - model: components["schemas"]["ModelIdentifierField"]; /** - * T5 Encoder - * @description T5 tokenizer and text encoder + * @description Latents tensor + * @default null */ - t5_encoder_model: components["schemas"]["ModelIdentifierField"]; + latents?: components["schemas"]["LatentsField"] | null; /** - * CLIP Embed - * @description CLIP Embed loader + * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. + * @default null */ - clip_embed_model: components["schemas"]["ModelIdentifierField"]; + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; /** - * VAE - * @description VAE model to load - * @default null + * Denoising Start + * @description When to start denoising, expressed a percentage of total steps + * @default 0 */ - vae_model?: components["schemas"]["ModelIdentifierField"] | null; + denoising_start?: number; /** - * type - * @default flux_model_loader - * @constant + * Denoising End + * @description When to stop denoising, expressed a percentage of total steps + * @default 1 */ - type: "flux_model_loader"; - }; - /** - * FluxModelLoaderOutput - * @description Flux base model loader output - */ - FluxModelLoaderOutput: { + denoising_end?: number; /** - * Transformer - * @description Transformer + * Add Noise + * @description Add noise based on denoising start. + * @default true */ - transformer: components["schemas"]["TransformerField"]; + add_noise?: boolean; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Transformer + * @description Flux model (Transformer) to load + * @default null */ - clip: components["schemas"]["CLIPField"]; + transformer?: components["schemas"]["TransformerField"] | null; /** - * T5 Encoder - * @description T5 tokenizer and text encoder + * Control LoRA + * @description Control LoRA model to load + * @default null */ - t5_encoder: components["schemas"]["T5EncoderField"]; + control_lora?: components["schemas"]["ControlLoRAField"] | null; /** - * VAE - * @description VAE + * Positive Text Conditioning + * @description Positive conditioning tensor + * @default null */ - vae: components["schemas"]["VAEField"]; + positive_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; /** - * Max Seq Length - * @description The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer) - * @enum {integer} + * Negative Text Conditioning + * @description Negative conditioning tensor. Can be None if cfg_scale is 1.0. + * @default null */ - max_seq_len: 256 | 512; + negative_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; /** - * type - * @default flux_model_loader_output - * @constant + * Redux Conditioning + * @description FLUX Redux conditioning tensor. + * @default null */ - type: "flux_model_loader_output"; - }; - /** - * FluxReduxConditioningField - * @description A FLUX Redux conditioning tensor primitive value - */ - FluxReduxConditioningField: { - /** @description The Redux image conditioning tensor. */ - conditioning: components["schemas"]["TensorField"]; + redux_conditioning?: components["schemas"]["FluxReduxConditioningField"] | components["schemas"]["FluxReduxConditioningField"][] | null; /** - * @description The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True. + * @description FLUX Fill conditioning. * @default null */ - mask?: components["schemas"]["TensorField"] | null; - }; - /** - * FluxReduxConfig - * @description Model config for FLUX Tools Redux model. - */ - FluxReduxConfig: { + fill_conditioning?: components["schemas"]["FluxFillConditioningField"] | null; /** - * Key - * @description A unique key for this model. + * CFG Scale + * @description Classifier-Free Guidance scale + * @default 1 */ - key: string; + cfg_scale?: number | number[]; /** - * Hash - * @description The hash of the model file(s). + * CFG Scale Start Step + * @description Index of the first step to apply cfg_scale. Negative indices count backwards from the the last step (e.g. a value of -1 refers to the final step). + * @default 0 */ - hash: string; + cfg_scale_start_step?: number; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * CFG Scale End Step + * @description Index of the last step to apply cfg_scale. Negative indices count backwards from the last step (e.g. a value of -1 refers to the final step). + * @default -1 */ - path: string; + cfg_scale_end_step?: number; /** - * File Size - * @description The size of the model in bytes. + * Width + * @description Width of the generated image. + * @default 1024 */ - file_size: number; + width?: number; /** - * Name - * @description Name of the model. + * Height + * @description Height of the generated image. + * @default 1024 */ - name: string; + height?: number; /** - * Type - * @default flux_redux - * @constant + * Num Steps + * @description Number of diffusion steps. Recommended values are schnell: 4, dev: 50. + * @default 4 */ - type: "flux_redux"; + num_steps?: number; /** - * Format - * @default checkpoint - * @constant + * Guidance + * @description The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell. + * @default 4 */ - format: "checkpoint"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + guidance?: number; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * Seed + * @description Randomness seed for reproducibility. + * @default 0 */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + seed?: number; /** - * Description - * @description Model description + * Control + * @description ControlNet models. + * @default null */ - description?: string | null; + control?: components["schemas"]["FluxControlNetField"] | components["schemas"]["FluxControlNetField"][] | null; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * @description VAE + * @default null */ - source_api_response?: string | null; + controlnet_vae?: components["schemas"]["VAEField"] | null; /** - * Cover Image - * @description Url for image to preview model + * IP-Adapter + * @description IP-Adapter to apply + * @default null */ - cover_image?: string | null; + ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; /** - * Submodels - * @description Loadable submodels in this model + * Kontext Conditioning + * @description FLUX Kontext conditioning (reference image). + * @default null */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + kontext_conditioning?: components["schemas"]["FluxKontextConditioningField"] | components["schemas"]["FluxKontextConditioningField"][] | null; /** - * Usage Info - * @description Usage information for this model + * type + * @default flux_denoise + * @constant */ - usage_info?: string | null; + type: "flux_denoise"; }; /** - * FLUX Redux - * @description Runs a FLUX Redux model to generate a conditioning tensor. + * FLUX Denoise + Metadata + * @description Run denoising process with a FLUX transformer model + metadata. */ - FluxReduxInvocation: { + FluxDenoiseLatentsMetaInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -8958,137 +8616,161 @@ export type components = { */ use_cache?: boolean; /** - * @description The FLUX Redux image prompt. + * @description Latents tensor * @default null */ - image?: components["schemas"]["ImageField"] | null; + latents?: components["schemas"]["LatentsField"] | null; /** - * @description The bool mask associated with this FLUX Redux image prompt. Excluded regions should be set to False, included regions should be set to True. + * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. * @default null */ - mask?: components["schemas"]["TensorField"] | null; + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; /** - * FLUX Redux Model - * @description The FLUX Redux model to use. - * @default null + * Denoising Start + * @description When to start denoising, expressed a percentage of total steps + * @default 0 */ - redux_model?: components["schemas"]["ModelIdentifierField"] | null; + denoising_start?: number; /** - * Downsampling Factor - * @description Redux Downsampling Factor (1-9) + * Denoising End + * @description When to stop denoising, expressed a percentage of total steps * @default 1 */ - downsampling_factor?: number; + denoising_end?: number; /** - * Downsampling Function - * @description Redux Downsampling Function - * @default area - * @enum {string} + * Add Noise + * @description Add noise based on denoising start. + * @default true */ - downsampling_function?: "nearest" | "bilinear" | "bicubic" | "area" | "nearest-exact"; + add_noise?: boolean; /** - * Weight - * @description Redux weight (0.0-1.0) - * @default 1 + * Transformer + * @description Flux model (Transformer) to load + * @default null */ - weight?: number; + transformer?: components["schemas"]["TransformerField"] | null; /** - * type - * @default flux_redux - * @constant + * Control LoRA + * @description Control LoRA model to load + * @default null */ - type: "flux_redux"; - }; - /** - * FluxReduxOutput - * @description The conditioning output of a FLUX Redux invocation. - */ - FluxReduxOutput: { + control_lora?: components["schemas"]["ControlLoRAField"] | null; /** - * Conditioning - * @description FLUX Redux conditioning tensor - */ - redux_cond: components["schemas"]["FluxReduxConditioningField"]; + * Positive Text Conditioning + * @description Positive conditioning tensor + * @default null + */ + positive_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; /** - * type - * @default flux_redux_output - * @constant + * Negative Text Conditioning + * @description Negative conditioning tensor. Can be None if cfg_scale is 1.0. + * @default null */ - type: "flux_redux_output"; - }; - /** - * Prompt - FLUX - * @description Encodes and preps a prompt for a flux image. - */ - FluxTextEncoderInvocation: { + negative_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Redux Conditioning + * @description FLUX Redux conditioning tensor. + * @default null */ - id: string; + redux_conditioning?: components["schemas"]["FluxReduxConditioningField"] | components["schemas"]["FluxReduxConditioningField"][] | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * @description FLUX Fill conditioning. + * @default null */ - is_intermediate?: boolean; + fill_conditioning?: components["schemas"]["FluxFillConditioningField"] | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * CFG Scale + * @description Classifier-Free Guidance scale + * @default 1 */ - use_cache?: boolean; + cfg_scale?: number | number[]; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * CFG Scale Start Step + * @description Index of the first step to apply cfg_scale. Negative indices count backwards from the the last step (e.g. a value of -1 refers to the final step). + * @default 0 */ - clip?: components["schemas"]["CLIPField"] | null; + cfg_scale_start_step?: number; /** - * T5Encoder - * @description T5 tokenizer and text encoder + * CFG Scale End Step + * @description Index of the last step to apply cfg_scale. Negative indices count backwards from the last step (e.g. a value of -1 refers to the final step). + * @default -1 + */ + cfg_scale_end_step?: number; + /** + * Width + * @description Width of the generated image. + * @default 1024 + */ + width?: number; + /** + * Height + * @description Height of the generated image. + * @default 1024 + */ + height?: number; + /** + * Num Steps + * @description Number of diffusion steps. Recommended values are schnell: 4, dev: 50. + * @default 4 + */ + num_steps?: number; + /** + * Guidance + * @description The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell. + * @default 4 + */ + guidance?: number; + /** + * Seed + * @description Randomness seed for reproducibility. + * @default 0 + */ + seed?: number; + /** + * Control + * @description ControlNet models. * @default null */ - t5_encoder?: components["schemas"]["T5EncoderField"] | null; + control?: components["schemas"]["FluxControlNetField"] | components["schemas"]["FluxControlNetField"][] | null; /** - * T5 Max Seq Len - * @description Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models. + * @description VAE * @default null */ - t5_max_seq_len?: (256 | 512) | null; + controlnet_vae?: components["schemas"]["VAEField"] | null; /** - * Prompt - * @description Text prompt to encode. + * IP-Adapter + * @description IP-Adapter to apply * @default null */ - prompt?: string | null; + ip_adapter?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; /** - * @description A mask defining the region that this conditioning prompt applies to. + * Kontext Conditioning + * @description FLUX Kontext conditioning (reference image). * @default null */ - mask?: components["schemas"]["TensorField"] | null; + kontext_conditioning?: components["schemas"]["FluxKontextConditioningField"] | components["schemas"]["FluxKontextConditioningField"][] | null; /** * type - * @default flux_text_encoder + * @default flux_denoise_meta * @constant */ - type: "flux_text_encoder"; + type: "flux_denoise_meta"; }; /** - * Latents to Image - FLUX - * @description Generates an image from latents. + * FluxFillConditioningField + * @description A FLUX Fill conditioning field. */ - FluxVaeDecodeInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + FluxFillConditioningField: { + /** @description The FLUX Fill reference image. */ + image: components["schemas"]["ImageField"]; + /** @description The FLUX Fill inpaint mask. */ + mask: components["schemas"]["TensorField"]; + }; + /** + * FLUX Fill Conditioning + * @description Prepare the FLUX Fill conditioning data. + */ + FluxFillInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9107,27 +8789,44 @@ export type components = { */ use_cache?: boolean; /** - * @description Latents tensor + * @description The FLUX Fill reference image. * @default null */ - latents?: components["schemas"]["LatentsField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * @description VAE + * @description The bool inpainting mask. Excluded regions should be set to False, included regions should be set to True. * @default null */ - vae?: components["schemas"]["VAEField"] | null; + mask?: components["schemas"]["TensorField"] | null; /** * type - * @default flux_vae_decode + * @default flux_fill * @constant */ - type: "flux_vae_decode"; + type: "flux_fill"; }; /** - * Image to Latents - FLUX - * @description Encodes an image into latents. + * FluxFillOutput + * @description The conditioning output of a FLUX Fill invocation. */ - FluxVaeEncodeInvocation: { + FluxFillOutput: { + /** + * Conditioning + * @description FLUX Redux conditioning tensor + */ + fill_cond: components["schemas"]["FluxFillConditioningField"]; + /** + * type + * @default flux_fill_output + * @constant + */ + type: "flux_fill_output"; + }; + /** + * FLUX IP-Adapter + * @description Collects FLUX IP-Adapter info to pass to other nodes. + */ + FluxIPAdapterInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9146,72 +8845,64 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to encode. + * @description The IP-Adapter image prompt(s). * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * @description VAE + * IP-Adapter Model + * @description The IP-Adapter model. * @default null */ - vae?: components["schemas"]["VAEField"] | null; + ip_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * type - * @default flux_vae_encode + * Clip Vision Model + * @description CLIP Vision model to use. + * @default ViT-L * @constant */ - type: "flux_vae_encode"; - }; - /** FoundModel */ - FoundModel: { + clip_vision_model?: "ViT-L"; /** - * Path - * @description Path to the model + * Weight + * @description The weight given to the IP-Adapter + * @default 1 */ - path: string; + weight?: number | number[]; /** - * Is Installed - * @description Whether or not the model is already installed + * Begin Step Percent + * @description When the IP-Adapter is first applied (% of total steps) + * @default 0 */ - is_installed: boolean; - }; - /** - * FreeUConfig - * @description Configuration for the FreeU hyperparameters. - * - https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu - * - https://github.com/ChenyangSi/FreeU - */ - FreeUConfig: { + begin_step_percent?: number; /** - * S1 - * @description Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. + * End Step Percent + * @description When the IP-Adapter is last applied (% of total steps) + * @default 1 */ - s1: number; + end_step_percent?: number; /** - * S2 - * @description Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. + * type + * @default flux_ip_adapter + * @constant */ - s2: number; + type: "flux_ip_adapter"; + }; + /** + * FLUX Kontext Image Prep + * @description Prepares an image or images for use with FLUX Kontext. The first/single image is resized to the nearest + * preferred Kontext resolution. All other images are concatenated horizontally, maintaining their aspect ratio. + */ + FluxKontextConcatenateImagesInvocation: { /** - * B1 - * @description Scaling factor for stage 1 to amplify the contributions of backbone features. + * @description The board to save the image to + * @default null */ - b1: number; + board?: components["schemas"]["BoardField"] | null; /** - * B2 - * @description Scaling factor for stage 2 to amplify the contributions of backbone features. + * @description Optional metadata to be saved with the image + * @default null */ - b2: number; - }; - /** - * Apply FreeU - SD1.5, SDXL - * @description Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2): - * - * SD1.5: 1.2/1.4/0.9/0.2, - * SD2: 1.1/1.2/0.9/0.2, - * SDXL: 1.1/1.2/0.6/0.4, - */ - FreeUInvocation: { + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9230,47 +8921,37 @@ export type components = { */ use_cache?: boolean; /** - * UNet - * @description UNet (scheduler, LoRAs) + * Images + * @description The images to concatenate * @default null */ - unet?: components["schemas"]["UNetField"] | null; - /** - * B1 - * @description Scaling factor for stage 1 to amplify the contributions of backbone features. - * @default 1.2 - */ - b1?: number; - /** - * B2 - * @description Scaling factor for stage 2 to amplify the contributions of backbone features. - * @default 1.4 - */ - b2?: number; - /** - * S1 - * @description Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. - * @default 0.9 - */ - s1?: number; + images?: components["schemas"]["ImageField"][] | null; /** - * S2 - * @description Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. - * @default 0.2 + * Use Preferred Resolution + * @description Use FLUX preferred resolutions for the first image + * @default true */ - s2?: number; + use_preferred_resolution?: boolean; /** * type - * @default freeu + * @default flux_kontext_image_prep * @constant */ - type: "freeu"; + type: "flux_kontext_image_prep"; }; /** - * Get Image Mask Bounding Box - * @description Gets the bounding box of the given mask image. + * FluxKontextConditioningField + * @description A conditioning field for FLUX Kontext (reference image). */ - GetMaskBoundingBoxInvocation: { + FluxKontextConditioningField: { + /** @description The Kontext reference image. */ + image: components["schemas"]["ImageField"]; + }; + /** + * Kontext Conditioning - FLUX + * @description Prepares a reference image for FLUX Kontext conditioning. + */ + FluxKontextInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9289,134 +8970,39 @@ export type components = { */ use_cache?: boolean; /** - * @description The mask to crop. + * @description The Kontext reference image. * @default null */ - mask?: components["schemas"]["ImageField"] | null; - /** - * Margin - * @description Margin to add to the bounding box. - * @default 0 - */ - margin?: number; - /** - * @description Color of the mask in the image. - * @default { - * "r": 255, - * "g": 255, - * "b": 255, - * "a": 255 - * } - */ - mask_color?: components["schemas"]["ColorField"]; - /** - * type - * @default get_image_mask_bounding_box - * @constant - */ - type: "get_image_mask_bounding_box"; - }; - /** GlmEncoderField */ - GlmEncoderField: { - /** @description Info to load tokenizer submodel */ - tokenizer: components["schemas"]["ModelIdentifierField"]; - /** @description Info to load text_encoder submodel */ - text_encoder: components["schemas"]["ModelIdentifierField"]; - }; - /** - * GradientMaskOutput - * @description Outputs a denoise mask and an image representing the total gradient of the mask. - */ - GradientMaskOutput: { - /** @description Mask for denoise model run. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. */ - denoise_mask: components["schemas"]["DenoiseMaskField"]; - /** @description Image representing the total gradient area of the mask. For paste-back purposes. */ - expanded_mask_area: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type - * @default gradient_mask_output + * @default flux_kontext * @constant */ - type: "gradient_mask_output"; - }; - /** Graph */ - Graph: { - /** - * Id - * @description The id of this graph - */ - id?: string; - /** - * Nodes - * @description The nodes in this graph - */ - nodes?: { - [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; - }; - /** - * Edges - * @description The connections between nodes and their fields in this graph - */ - edges?: components["schemas"]["Edge"][]; + type: "flux_kontext"; }; /** - * GraphExecutionState - * @description Tracks the state of a graph execution + * FluxKontextOutput + * @description The conditioning output of a FLUX Kontext invocation. */ - GraphExecutionState: { - /** - * Id - * @description The id of the execution state - */ - id: string; - /** @description The graph being executed */ - graph: components["schemas"]["Graph"]; - /** @description The expanded graph of activated and executed nodes */ - execution_graph: components["schemas"]["Graph"]; - /** - * Executed - * @description The set of node ids that have been executed - */ - executed: string[]; - /** - * Executed History - * @description The list of node ids that have been executed, in order of execution - */ - executed_history: string[]; - /** - * Results - * @description The results of node executions - */ - results: { - [key: string]: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["VideoOutput"]; - }; - /** - * Errors - * @description Errors raised when executing nodes - */ - errors: { - [key: string]: string; - }; + FluxKontextOutput: { /** - * Prepared Source Mapping - * @description The map of prepared nodes to original graph nodes + * Kontext Conditioning + * @description FLUX Kontext conditioning (reference image) */ - prepared_source_mapping: { - [key: string]: string; - }; + kontext_cond: components["schemas"]["FluxKontextConditioningField"]; /** - * Source Prepared Mapping - * @description The map of original graph nodes to prepared nodes + * type + * @default flux_kontext_output + * @constant */ - source_prepared_mapping: { - [key: string]: string[]; - }; + type: "flux_kontext_output"; }; /** - * Grounding DINO (Text Prompt Object Detection) - * @description Runs a Grounding DINO model. Performs zero-shot bounding-box object detection from a text prompt. + * Apply LoRA - FLUX + * @description Apply a LoRA model to a FLUX transformer and/or text encoder. */ - GroundingDinoInvocation: { + FluxLoRALoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9435,50 +9021,77 @@ export type components = { */ use_cache?: boolean; /** - * Model - * @description The Grounding DINO model to use. + * LoRA + * @description LoRA model to load * @default null */ - model?: ("grounding-dino-tiny" | "grounding-dino-base") | null; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** - * Prompt - * @description The prompt describing the object to segment. + * Weight + * @description The weight at which the LoRA is applied to each model + * @default 0.75 + */ + weight?: number; + /** + * FLUX Transformer + * @description Transformer * @default null */ - prompt?: string | null; + transformer?: components["schemas"]["TransformerField"] | null; /** - * @description The image to segment. + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - image?: components["schemas"]["ImageField"] | null; + clip?: components["schemas"]["CLIPField"] | null; /** - * Detection Threshold - * @description The detection threshold for the Grounding DINO model. All detected bounding boxes with scores above this threshold will be returned. - * @default 0.3 + * T5 Encoder + * @description T5 tokenizer and text encoder + * @default null */ - detection_threshold?: number; + t5_encoder?: components["schemas"]["T5EncoderField"] | null; /** * type - * @default grounding_dino + * @default flux_lora_loader * @constant */ - type: "grounding_dino"; + type: "flux_lora_loader"; }; /** - * HED Edge Detection - * @description Geneartes an edge map using the HED (softedge) model. + * FluxLoRALoaderOutput + * @description FLUX LoRA Loader Output */ - HEDEdgeDetectionInvocation: { + FluxLoRALoaderOutput: { /** - * @description The board to save the image to + * FLUX Transformer + * @description Transformer * @default null */ - board?: components["schemas"]["BoardField"] | null; + transformer: components["schemas"]["TransformerField"] | null; /** - * @description Optional metadata to be saved with the image + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - metadata?: components["schemas"]["MetadataField"] | null; + clip: components["schemas"]["CLIPField"] | null; + /** + * T5 Encoder + * @description T5 tokenizer and text encoder + * @default null + */ + t5_encoder: components["schemas"]["T5EncoderField"] | null; + /** + * type + * @default flux_lora_loader_output + * @constant + */ + type: "flux_lora_loader_output"; + }; + /** + * Main Model - FLUX + * @description Loads a flux base model, outputting its submodels. + */ + FluxModelLoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9496,60 +9109,87 @@ export type components = { * @default true */ use_cache?: boolean; + /** @description Flux model (Transformer) to load */ + model: components["schemas"]["ModelIdentifierField"]; /** - * @description The image to process - * @default null + * T5 Encoder + * @description T5 tokenizer and text encoder */ - image?: components["schemas"]["ImageField"] | null; + t5_encoder_model: components["schemas"]["ModelIdentifierField"]; /** - * Scribble - * @description Whether or not to use scribble mode - * @default false + * CLIP Embed + * @description CLIP Embed loader */ - scribble?: boolean; + clip_embed_model: components["schemas"]["ModelIdentifierField"]; + /** + * VAE + * @description VAE model to load + * @default null + */ + vae_model?: components["schemas"]["ModelIdentifierField"] | null; /** * type - * @default hed_edge_detection + * @default flux_model_loader * @constant */ - type: "hed_edge_detection"; + type: "flux_model_loader"; }; /** - * HFModelSource - * @description A HuggingFace repo_id with optional variant, sub-folder and access token. - * Note that the variant option, if not provided to the constructor, will default to fp16, which is - * what people (almost) always want. + * FluxModelLoaderOutput + * @description Flux base model loader output */ - HFModelSource: { - /** Repo Id */ - repo_id: string; - /** @default fp16 */ - variant?: components["schemas"]["ModelRepoVariant"] | null; - /** Subfolder */ - subfolder?: string | null; - /** Access Token */ - access_token?: string | null; + FluxModelLoaderOutput: { /** - * @description discriminator enum property added by openapi-typescript - * @enum {string} + * Transformer + * @description Transformer */ - type: "hf"; + transformer: components["schemas"]["TransformerField"]; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; + /** + * T5 Encoder + * @description T5 tokenizer and text encoder + */ + t5_encoder: components["schemas"]["T5EncoderField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * Max Seq Length + * @description The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer) + * @enum {integer} + */ + max_seq_len: 256 | 512; + /** + * type + * @default flux_model_loader_output + * @constant + */ + type: "flux_model_loader_output"; }; /** - * HFTokenStatus - * @enum {string} + * FluxReduxConditioningField + * @description A FLUX Redux conditioning tensor primitive value */ - HFTokenStatus: "valid" | "invalid" | "unknown"; - /** HTTPValidationError */ - HTTPValidationError: { - /** Detail */ - detail?: components["schemas"]["ValidationError"][]; + FluxReduxConditioningField: { + /** @description The Redux image conditioning tensor. */ + conditioning: components["schemas"]["TensorField"]; + /** + * @description The mask associated with this conditioning tensor. Excluded regions should be set to False, included regions should be set to True. + * @default null + */ + mask?: components["schemas"]["TensorField"] | null; }; /** - * Heuristic Resize - * @description Resize an image using a heuristic method. Preserves edge maps. + * FLUX Redux + * @description Runs a FLUX Redux model to generate a conditioning tensor. */ - HeuristicResizeInvocation: { + FluxReduxInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9568,215 +9208,265 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to resize + * @description The FLUX Redux image prompt. * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Width - * @description The width to resize to (px) - * @default 512 + * @description The bool mask associated with this FLUX Redux image prompt. Excluded regions should be set to False, included regions should be set to True. + * @default null */ - width?: number; + mask?: components["schemas"]["TensorField"] | null; /** - * Height - * @description The height to resize to (px) - * @default 512 + * FLUX Redux Model + * @description The FLUX Redux model to use. + * @default null */ - height?: number; + redux_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Downsampling Factor + * @description Redux Downsampling Factor (1-9) + * @default 1 + */ + downsampling_factor?: number; + /** + * Downsampling Function + * @description Redux Downsampling Function + * @default area + * @enum {string} + */ + downsampling_function?: "nearest" | "bilinear" | "bicubic" | "area" | "nearest-exact"; + /** + * Weight + * @description Redux weight (0.0-1.0) + * @default 1 + */ + weight?: number; /** * type - * @default heuristic_resize + * @default flux_redux * @constant */ - type: "heuristic_resize"; + type: "flux_redux"; }; /** - * HuggingFaceMetadata - * @description Extended metadata fields provided by HuggingFace. + * FluxReduxOutput + * @description The conditioning output of a FLUX Redux invocation. */ - HuggingFaceMetadata: { - /** - * Name - * @description model's name - */ - name: string; + FluxReduxOutput: { /** - * Files - * @description model files and their sizes + * Conditioning + * @description FLUX Redux conditioning tensor */ - files?: components["schemas"]["RemoteModelFile"][]; + redux_cond: components["schemas"]["FluxReduxConditioningField"]; /** - * @description discriminator enum property added by openapi-typescript - * @enum {string} + * type + * @default flux_redux_output + * @constant */ - type: "huggingface"; + type: "flux_redux_output"; + }; + /** + * Prompt - FLUX + * @description Encodes and preps a prompt for a flux image. + */ + FluxTextEncoderInvocation: { /** * Id - * @description The HF model id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ id: string; /** - * Api Response - * @description Response from the HF API as stringified JSON + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - api_response?: string | null; + is_intermediate?: boolean; /** - * Is Diffusers - * @description Whether the metadata is for a Diffusers format model - * @default false + * Use Cache + * @description Whether or not to use the cache + * @default true */ - is_diffusers?: boolean; + use_cache?: boolean; /** - * Ckpt Urls - * @description URLs for all checkpoint format models in the metadata + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - ckpt_urls?: string[] | null; - }; - /** HuggingFaceModels */ - HuggingFaceModels: { + clip?: components["schemas"]["CLIPField"] | null; /** - * Urls - * @description URLs for all checkpoint format models in the metadata + * T5Encoder + * @description T5 tokenizer and text encoder + * @default null */ - urls: string[] | null; + t5_encoder?: components["schemas"]["T5EncoderField"] | null; /** - * Is Diffusers - * @description Whether the metadata is for a Diffusers format model + * T5 Max Seq Len + * @description Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models. + * @default null */ - is_diffusers: boolean; - }; - /** - * IPAdapterCheckpointConfig - * @description Model config for IP Adapter checkpoint format models. - */ - IPAdapterCheckpointConfig: { + t5_max_seq_len?: (256 | 512) | null; /** - * Key - * @description A unique key for this model. + * Prompt + * @description Text prompt to encode. + * @default null */ - key: string; + prompt?: string | null; /** - * Hash - * @description The hash of the model file(s). + * @description A mask defining the region that this conditioning prompt applies to. + * @default null */ - hash: string; + mask?: components["schemas"]["TensorField"] | null; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * type + * @default flux_text_encoder + * @constant */ - path: string; + type: "flux_text_encoder"; + }; + /** + * Latents to Image - FLUX + * @description Generates an image from latents. + */ + FluxVaeDecodeInvocation: { /** - * File Size - * @description The size of the model in bytes. + * @description The board to save the image to + * @default null */ - file_size: number; + board?: components["schemas"]["BoardField"] | null; /** - * Name - * @description Name of the model. + * @description Optional metadata to be saved with the image + * @default null */ - name: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Type - * @default ip_adapter - * @constant + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - type: "ip_adapter"; + id: string; /** - * Format - * @default checkpoint - * @constant + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - format: "checkpoint"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + is_intermediate?: boolean; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * Use Cache + * @description Whether or not to use the cache + * @default true */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + use_cache?: boolean; /** - * Description - * @description Model description + * @description Latents tensor + * @default null */ - description?: string | null; + latents?: components["schemas"]["LatentsField"] | null; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * @description VAE + * @default null */ - source_api_response?: string | null; + vae?: components["schemas"]["VAEField"] | null; /** - * Cover Image - * @description Url for image to preview model + * type + * @default flux_vae_decode + * @constant */ - cover_image?: string | null; + type: "flux_vae_decode"; + }; + /** + * Image to Latents - FLUX + * @description Encodes an image into latents. + */ + FluxVaeEncodeInvocation: { /** - * Submodels - * @description Loadable submodels in this model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + id: string; /** - * Usage Info - * @description Usage information for this model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - usage_info?: string | null; - }; - /** IPAdapterField */ - IPAdapterField: { + is_intermediate?: boolean; /** - * Image - * @description The IP-Adapter image prompt(s). + * Use Cache + * @description Whether or not to use the cache + * @default true */ - image: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][]; - /** @description The IP-Adapter model to use. */ - ip_adapter_model: components["schemas"]["ModelIdentifierField"]; - /** @description The name of the CLIP image encoder model. */ - image_encoder_model: components["schemas"]["ModelIdentifierField"]; + use_cache?: boolean; /** - * Weight - * @description The weight given to the IP-Adapter. - * @default 1 + * @description The image to encode. + * @default null */ - weight?: number | number[]; + image?: components["schemas"]["ImageField"] | null; /** - * Target Blocks - * @description The IP Adapter blocks to apply - * @default [] + * @description VAE + * @default null */ - target_blocks?: string[]; + vae?: components["schemas"]["VAEField"] | null; /** - * Method - * @description Weight apply method - * @default full + * type + * @default flux_vae_encode + * @constant */ - method?: string; + type: "flux_vae_encode"; + }; + /** + * FluxVariantType + * @enum {string} + */ + FluxVariantType: "schnell" | "dev" | "dev_fill"; + /** FoundModel */ + FoundModel: { /** - * Begin Step Percent - * @description When the IP-Adapter is first applied (% of total steps) - * @default 0 + * Path + * @description Path to the model */ - begin_step_percent?: number; + path: string; /** - * End Step Percent - * @description When the IP-Adapter is last applied (% of total steps) - * @default 1 + * Is Installed + * @description Whether or not the model is already installed */ - end_step_percent?: number; + is_installed: boolean; + }; + /** + * FreeUConfig + * @description Configuration for the FreeU hyperparameters. + * - https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu + * - https://github.com/ChenyangSi/FreeU + */ + FreeUConfig: { /** - * @description The bool mask associated with this IP-Adapter. Excluded regions should be set to False, included regions should be set to True. - * @default null + * S1 + * @description Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. */ - mask?: components["schemas"]["TensorField"] | null; + s1: number; + /** + * S2 + * @description Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. + */ + s2: number; + /** + * B1 + * @description Scaling factor for stage 1 to amplify the contributions of backbone features. + */ + b1: number; + /** + * B2 + * @description Scaling factor for stage 2 to amplify the contributions of backbone features. + */ + b2: number; }; /** - * IP-Adapter - SD1.5, SDXL - * @description Collects IP-Adapter info to pass to other nodes. + * Apply FreeU - SD1.5, SDXL + * @description Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2): + * + * SD1.5: 1.2/1.4/0.9/0.2, + * SD2: 1.1/1.2/0.9/0.2, + * SDXL: 1.1/1.2/0.6/0.4, */ - IPAdapterInvocation: { + FreeUInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -9795,272 +9485,193 @@ export type components = { */ use_cache?: boolean; /** - * Image - * @description The IP-Adapter image prompt(s). - * @default null - */ - image?: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][] | null; - /** - * IP-Adapter Model - * @description The IP-Adapter model. + * UNet + * @description UNet (scheduler, LoRAs) * @default null */ - ip_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * Clip Vision Model - * @description CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models. - * @default ViT-H - * @enum {string} - */ - clip_vision_model?: "ViT-H" | "ViT-G" | "ViT-L"; - /** - * Weight - * @description The weight given to the IP-Adapter - * @default 1 - */ - weight?: number | number[]; + unet?: components["schemas"]["UNetField"] | null; /** - * Method - * @description The method to apply the IP-Adapter - * @default full - * @enum {string} + * B1 + * @description Scaling factor for stage 1 to amplify the contributions of backbone features. + * @default 1.2 */ - method?: "full" | "style" | "composition" | "style_strong" | "style_precise"; + b1?: number; /** - * Begin Step Percent - * @description When the IP-Adapter is first applied (% of total steps) - * @default 0 + * B2 + * @description Scaling factor for stage 2 to amplify the contributions of backbone features. + * @default 1.4 */ - begin_step_percent?: number; + b2?: number; /** - * End Step Percent - * @description When the IP-Adapter is last applied (% of total steps) - * @default 1 + * S1 + * @description Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. + * @default 0.9 */ - end_step_percent?: number; + s1?: number; /** - * @description A mask defining the region that this IP-Adapter applies to. - * @default null + * S2 + * @description Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. + * @default 0.2 */ - mask?: components["schemas"]["TensorField"] | null; + s2?: number; /** * type - * @default ip_adapter + * @default freeu * @constant */ - type: "ip_adapter"; + type: "freeu"; }; /** - * IPAdapterInvokeAIConfig - * @description Model config for IP Adapter diffusers format models. + * Get Image Mask Bounding Box + * @description Gets the bounding box of the given mask image. */ - IPAdapterInvokeAIConfig: { + GetMaskBoundingBoxInvocation: { /** - * Key - * @description A unique key for this model. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - key: string; + id: string; /** - * Hash - * @description The hash of the model file(s). + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - hash: string; + is_intermediate?: boolean; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - path: string; + use_cache?: boolean; /** - * File Size - * @description The size of the model in bytes. + * @description The mask to crop. + * @default null */ - file_size: number; + mask?: components["schemas"]["ImageField"] | null; /** - * Name - * @description Name of the model. + * Margin + * @description Margin to add to the bounding box. + * @default 0 */ - name: string; + margin?: number; /** - * Type - * @default ip_adapter - * @constant + * @description Color of the mask in the image. + * @default { + * "r": 255, + * "g": 255, + * "b": 255, + * "a": 255 + * } */ - type: "ip_adapter"; + mask_color?: components["schemas"]["ColorField"]; /** - * Format - * @default invokeai + * type + * @default get_image_mask_bounding_box * @constant */ - format: "invokeai"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; - /** - * Source - * @description The original source of the model (path, URL or repo_id). - */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; - /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. - */ - source_api_response?: string | null; - /** - * Cover Image - * @description Url for image to preview model - */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info?: string | null; - /** Image Encoder Model Id */ - image_encoder_model_id: string; + type: "get_image_mask_bounding_box"; + }; + /** GlmEncoderField */ + GlmEncoderField: { + /** @description Info to load tokenizer submodel */ + tokenizer: components["schemas"]["ModelIdentifierField"]; + /** @description Info to load text_encoder submodel */ + text_encoder: components["schemas"]["ModelIdentifierField"]; }; /** - * IPAdapterMetadataField - * @description IP Adapter Field, minus the CLIP Vision Encoder model + * GradientMaskOutput + * @description Outputs a denoise mask and an image representing the total gradient of the mask. */ - IPAdapterMetadataField: { - /** @description The IP-Adapter image prompt. */ - image: components["schemas"]["ImageField"]; - /** @description The IP-Adapter model. */ - ip_adapter_model: components["schemas"]["ModelIdentifierField"]; - /** - * Clip Vision Model - * @description The CLIP Vision model - * @enum {string} - */ - clip_vision_model: "ViT-L" | "ViT-H" | "ViT-G"; - /** - * Method - * @description Method to apply IP Weights with - * @enum {string} - */ - method: "full" | "style" | "composition" | "style_strong" | "style_precise"; - /** - * Weight - * @description The weight given to the IP-Adapter - */ - weight: number | number[]; + GradientMaskOutput: { + /** @description Mask for denoise model run. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. */ + denoise_mask: components["schemas"]["DenoiseMaskField"]; + /** @description Image representing the total gradient area of the mask. For paste-back purposes. */ + expanded_mask_area: components["schemas"]["ImageField"]; /** - * Begin Step Percent - * @description When the IP-Adapter is first applied (% of total steps) + * type + * @default gradient_mask_output + * @constant */ - begin_step_percent: number; + type: "gradient_mask_output"; + }; + /** Graph */ + Graph: { /** - * End Step Percent - * @description When the IP-Adapter is last applied (% of total steps) + * Id + * @description The id of this graph */ - end_step_percent: number; - }; - /** IPAdapterOutput */ - IPAdapterOutput: { + id?: string; /** - * IP-Adapter - * @description IP-Adapter to apply + * Nodes + * @description The nodes in this graph */ - ip_adapter: components["schemas"]["IPAdapterField"]; + nodes?: { + [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + }; /** - * type - * @default ip_adapter_output - * @constant + * Edges + * @description The connections between nodes and their fields in this graph */ - type: "ip_adapter_output"; + edges?: components["schemas"]["Edge"][]; }; /** - * Ideal Size - SD1.5, SDXL - * @description Calculates the ideal size for generation to avoid duplication + * GraphExecutionState + * @description Tracks the state of a graph execution */ - IdealSizeInvocation: { + GraphExecutionState: { /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description The id of the execution state */ id: string; + /** @description The graph being executed */ + graph: components["schemas"]["Graph"]; + /** @description The expanded graph of activated and executed nodes */ + execution_graph: components["schemas"]["Graph"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * Width - * @description Final image width - * @default 1024 - */ - width?: number; - /** - * Height - * @description Final image height - * @default 576 - */ - height?: number; - /** - * @description UNet (scheduler, LoRAs) - * @default null + * Executed + * @description The set of node ids that have been executed */ - unet?: components["schemas"]["UNetField"] | null; + executed: string[]; /** - * Multiplier - * @description Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large) - * @default 1 + * Executed History + * @description The list of node ids that have been executed, in order of execution */ - multiplier?: number; + executed_history: string[]; /** - * type - * @default ideal_size - * @constant + * Results + * @description The results of node executions */ - type: "ideal_size"; - }; - /** - * IdealSizeOutput - * @description Base class for invocations that output an image - */ - IdealSizeOutput: { + results: { + [key: string]: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["VideoOutput"]; + }; /** - * Width - * @description The ideal width of the image (in pixels) + * Errors + * @description Errors raised when executing nodes */ - width: number; + errors: { + [key: string]: string; + }; /** - * Height - * @description The ideal height of the image (in pixels) + * Prepared Source Mapping + * @description The map of prepared nodes to original graph nodes */ - height: number; + prepared_source_mapping: { + [key: string]: string; + }; /** - * type - * @default ideal_size_output - * @constant + * Source Prepared Mapping + * @description The map of original graph nodes to prepared nodes */ - type: "ideal_size_output"; + source_prepared_mapping: { + [key: string]: string[]; + }; }; /** - * Image Batch - * @description Create a batched generation, where the workflow is executed once for each image in the batch. + * Grounding DINO (Text Prompt Object Detection) + * @description Runs a Grounding DINO model. Performs zero-shot bounding-box object detection from a text prompt. */ - ImageBatchInvocation: { + GroundingDinoInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -10079,30 +9690,40 @@ export type components = { */ use_cache?: boolean; /** - * Batch Group - * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. - * @default None - * @enum {string} + * Model + * @description The Grounding DINO model to use. + * @default null */ - batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; + model?: ("grounding-dino-tiny" | "grounding-dino-base") | null; /** - * Images - * @description The images to batch over + * Prompt + * @description The prompt describing the object to segment. * @default null */ - images?: components["schemas"]["ImageField"][] | null; + prompt?: string | null; + /** + * @description The image to segment. + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Detection Threshold + * @description The detection threshold for the Grounding DINO model. All detected bounding boxes with scores above this threshold will be returned. + * @default 0.3 + */ + detection_threshold?: number; /** * type - * @default image_batch + * @default grounding_dino * @constant */ - type: "image_batch"; + type: "grounding_dino"; }; /** - * Blur Image - * @description Blurs an image + * HED Edge Detection + * @description Geneartes an edge map using the HED (softedge) model. */ - ImageBlurInvocation: { + HEDEdgeDetectionInvocation: { /** * @description The board to save the image to * @default null @@ -10131,57 +9752,59 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to blur + * @description The image to process * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Radius - * @description The blur radius - * @default 8 - */ - radius?: number; - /** - * Blur Type - * @description The type of blur - * @default gaussian - * @enum {string} + * Scribble + * @description Whether or not to use scribble mode + * @default false */ - blur_type?: "gaussian" | "box"; + scribble?: boolean; /** * type - * @default img_blur + * @default hed_edge_detection * @constant */ - type: "img_blur"; + type: "hed_edge_detection"; }; /** - * ImageCategory - * @description The category of an image. - * - * - GENERAL: The image is an output, init image, or otherwise an image without a specialized purpose. - * - MASK: The image is a mask image. - * - CONTROL: The image is a ControlNet control image. - * - USER: The image is a user-provide image. - * - OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes. + * HFModelSource + * @description A HuggingFace repo_id with optional variant, sub-folder and access token. + * Note that the variant option, if not provided to the constructor, will default to fp16, which is + * what people (almost) always want. + */ + HFModelSource: { + /** Repo Id */ + repo_id: string; + /** @default fp16 */ + variant?: components["schemas"]["ModelRepoVariant"] | null; + /** Subfolder */ + subfolder?: string | null; + /** Access Token */ + access_token?: string | null; + /** + * @description discriminator enum property added by openapi-typescript + * @enum {string} + */ + type: "hf"; + }; + /** + * HFTokenStatus * @enum {string} */ - ImageCategory: "general" | "mask" | "control" | "user" | "other"; + HFTokenStatus: "valid" | "invalid" | "unknown"; + /** HTTPValidationError */ + HTTPValidationError: { + /** Detail */ + detail?: components["schemas"]["ValidationError"][]; + }; /** - * Extract Image Channel - * @description Gets a channel from an image. + * Heuristic Resize + * @description Resize an image using a heuristic method. Preserves edge maps. */ - ImageChannelInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + HeuristicResizeInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -10200,101 +9823,136 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to get the channel from + * @description The image to resize * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Channel - * @description The channel to get - * @default A - * @enum {string} + * Width + * @description The width to resize to (px) + * @default 512 */ - channel?: "A" | "R" | "G" | "B"; + width?: number; + /** + * Height + * @description The height to resize to (px) + * @default 512 + */ + height?: number; /** * type - * @default img_chan + * @default heuristic_resize * @constant */ - type: "img_chan"; + type: "heuristic_resize"; }; /** - * Multiply Image Channel - * @description Scale a specific color channel of an image. + * HuggingFaceMetadata + * @description Extended metadata fields provided by HuggingFace. */ - ImageChannelMultiplyInvocation: { + HuggingFaceMetadata: { /** - * @description The board to save the image to - * @default null + * Name + * @description model's name */ - board?: components["schemas"]["BoardField"] | null; + name: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Files + * @description model files and their sizes */ - metadata?: components["schemas"]["MetadataField"] | null; + files?: components["schemas"]["RemoteModelFile"][]; + /** + * @description discriminator enum property added by openapi-typescript + * @enum {string} + */ + type: "huggingface"; /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description The HF model id */ id: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. + * Api Response + * @description Response from the HF API as stringified JSON + */ + api_response?: string | null; + /** + * Is Diffusers + * @description Whether the metadata is for a Diffusers format model * @default false */ - is_intermediate?: boolean; + is_diffusers?: boolean; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Ckpt Urls + * @description URLs for all checkpoint format models in the metadata */ - use_cache?: boolean; + ckpt_urls?: string[] | null; + }; + /** HuggingFaceModels */ + HuggingFaceModels: { /** - * @description The image to adjust - * @default null + * Urls + * @description URLs for all checkpoint format models in the metadata */ - image?: components["schemas"]["ImageField"] | null; + urls: string[] | null; /** - * Channel - * @description Which channel to adjust - * @default null + * Is Diffusers + * @description Whether the metadata is for a Diffusers format model */ - channel?: ("Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)") | null; + is_diffusers: boolean; + }; + /** IPAdapterField */ + IPAdapterField: { /** - * Scale - * @description The amount to scale the channel by. + * Image + * @description The IP-Adapter image prompt(s). + */ + image: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][]; + /** @description The IP-Adapter model to use. */ + ip_adapter_model: components["schemas"]["ModelIdentifierField"]; + /** @description The name of the CLIP image encoder model. */ + image_encoder_model: components["schemas"]["ModelIdentifierField"]; + /** + * Weight + * @description The weight given to the IP-Adapter. * @default 1 */ - scale?: number; + weight?: number | number[]; /** - * Invert Channel - * @description Invert the channel after scaling - * @default false + * Target Blocks + * @description The IP Adapter blocks to apply + * @default [] */ - invert_channel?: boolean; + target_blocks?: string[]; /** - * type - * @default img_channel_multiply - * @constant + * Method + * @description Weight apply method + * @default full */ - type: "img_channel_multiply"; - }; - /** - * Offset Image Channel - * @description Add or subtract a value from a specific color channel of an image. - */ - ImageChannelOffsetInvocation: { + method?: string; /** - * @description The board to save the image to - * @default null + * Begin Step Percent + * @description When the IP-Adapter is first applied (% of total steps) + * @default 0 */ - board?: components["schemas"]["BoardField"] | null; + begin_step_percent?: number; /** - * @description Optional metadata to be saved with the image + * End Step Percent + * @description When the IP-Adapter is last applied (% of total steps) + * @default 1 + */ + end_step_percent?: number; + /** + * @description The bool mask associated with this IP-Adapter. Excluded regions should be set to False, included regions should be set to True. * @default null */ - metadata?: components["schemas"]["MetadataField"] | null; + mask?: components["schemas"]["TensorField"] | null; + }; + /** + * IP-Adapter - SD1.5, SDXL + * @description Collects IP-Adapter info to pass to other nodes. + */ + IPAdapterInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -10313,655 +9971,634 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to adjust + * Image + * @description The IP-Adapter image prompt(s). * @default null */ - image?: components["schemas"]["ImageField"] | null; + image?: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][] | null; /** - * Channel - * @description Which channel to adjust + * IP-Adapter Model + * @description The IP-Adapter model. * @default null */ - channel?: ("Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)") | null; + ip_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * Offset - * @description The amount to adjust the channel by - * @default 0 + * Clip Vision Model + * @description CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models. + * @default ViT-H + * @enum {string} */ - offset?: number; + clip_vision_model?: "ViT-H" | "ViT-G" | "ViT-L"; /** - * type - * @default img_channel_offset - * @constant + * Weight + * @description The weight given to the IP-Adapter + * @default 1 */ - type: "img_channel_offset"; - }; - /** - * Image Collection Primitive - * @description A collection of image primitive values - */ - ImageCollectionInvocation: { + weight?: number | number[]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Method + * @description The method to apply the IP-Adapter + * @default full + * @enum {string} */ - id: string; + method?: "full" | "style" | "composition" | "style_strong" | "style_precise"; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Begin Step Percent + * @description When the IP-Adapter is first applied (% of total steps) + * @default 0 */ - is_intermediate?: boolean; + begin_step_percent?: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * End Step Percent + * @description When the IP-Adapter is last applied (% of total steps) + * @default 1 */ - use_cache?: boolean; + end_step_percent?: number; /** - * Collection - * @description The collection of image values + * @description A mask defining the region that this IP-Adapter applies to. * @default null */ - collection?: components["schemas"]["ImageField"][] | null; - /** - * type - * @default image_collection - * @constant - */ - type: "image_collection"; - }; - /** - * ImageCollectionOutput - * @description Base class for nodes that output a collection of images - */ - ImageCollectionOutput: { - /** - * Collection - * @description The output images - */ - collection: components["schemas"]["ImageField"][]; + mask?: components["schemas"]["TensorField"] | null; /** * type - * @default image_collection_output + * @default ip_adapter * @constant */ - type: "image_collection_output"; + type: "ip_adapter"; }; /** - * Convert Image Mode - * @description Converts an image to a different mode. + * IPAdapterMetadataField + * @description IP Adapter Field, minus the CLIP Vision Encoder model */ - ImageConvertInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + IPAdapterMetadataField: { + /** @description The IP-Adapter image prompt. */ + image: components["schemas"]["ImageField"]; + /** @description The IP-Adapter model. */ + ip_adapter_model: components["schemas"]["ModelIdentifierField"]; /** - * @description Optional metadata to be saved with the image - * @default null + * Clip Vision Model + * @description The CLIP Vision model + * @enum {string} */ - metadata?: components["schemas"]["MetadataField"] | null; + clip_vision_model: "ViT-L" | "ViT-H" | "ViT-G"; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Method + * @description Method to apply IP Weights with + * @enum {string} */ - id: string; + method: "full" | "style" | "composition" | "style_strong" | "style_precise"; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Weight + * @description The weight given to the IP-Adapter */ - is_intermediate?: boolean; + weight: number | number[]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Begin Step Percent + * @description When the IP-Adapter is first applied (% of total steps) */ - use_cache?: boolean; + begin_step_percent: number; /** - * @description The image to convert - * @default null + * End Step Percent + * @description When the IP-Adapter is last applied (% of total steps) */ - image?: components["schemas"]["ImageField"] | null; + end_step_percent: number; + }; + /** IPAdapterOutput */ + IPAdapterOutput: { /** - * Mode - * @description The mode to convert to - * @default L - * @enum {string} + * IP-Adapter + * @description IP-Adapter to apply */ - mode?: "L" | "RGB" | "RGBA" | "CMYK" | "YCbCr" | "LAB" | "HSV" | "I" | "F"; + ip_adapter: components["schemas"]["IPAdapterField"]; /** * type - * @default img_conv + * @default ip_adapter_output * @constant */ - type: "img_conv"; + type: "ip_adapter_output"; }; - /** - * Crop Image - * @description Crops an image to a specified box. The box can be outside of the image. - */ - ImageCropInvocation: { + /** IPAdapter_Checkpoint_FLUX_Config */ + IPAdapter_Checkpoint_FLUX_Config: { /** - * @description The board to save the image to - * @default null + * Key + * @description A unique key for this model. */ - board?: components["schemas"]["BoardField"] | null; + key: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Hash + * @description The hash of the model file(s). */ - metadata?: components["schemas"]["MetadataField"] | null; + hash: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - id: string; + path: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * File Size + * @description The size of the model in bytes. */ - is_intermediate?: boolean; + file_size: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Name + * @description Name of the model. */ - use_cache?: boolean; + name: string; /** - * @description The image to crop - * @default null + * Description + * @description Model description */ - image?: components["schemas"]["ImageField"] | null; + description: string | null; /** - * X - * @description The left x coordinate of the crop rectangle - * @default 0 + * Source + * @description The original source of the model (path, URL or repo_id). */ - x?: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Y - * @description The top y coordinate of the crop rectangle - * @default 0 + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - y?: number; + source_api_response: string | null; /** - * Width - * @description The width of the crop rectangle - * @default 512 + * Cover Image + * @description Url for image to preview model */ - width?: number; + cover_image: string | null; /** - * Height - * @description The height of the crop rectangle - * @default 512 + * Usage Info + * @description Usage information for this model */ - height?: number; + usage_info: string | null; /** - * type - * @default img_crop + * Type + * @default ip_adapter * @constant */ - type: "img_crop"; - }; - /** - * ImageDTO - * @description Deserialized image record, enriched for the frontend. - */ - ImageDTO: { + type: "ip_adapter"; /** - * Image Name - * @description The unique name of the image. + * Format + * @default checkpoint + * @constant */ - image_name: string; + format: "checkpoint"; /** - * Image Url - * @description The URL of the image. + * Base + * @default flux + * @constant */ - image_url: string; + base: "flux"; + }; + /** IPAdapter_Checkpoint_SD1_Config */ + IPAdapter_Checkpoint_SD1_Config: { /** - * Thumbnail Url - * @description The URL of the image's thumbnail. + * Key + * @description A unique key for this model. */ - thumbnail_url: string; - /** @description The type of the image. */ - image_origin: components["schemas"]["ResourceOrigin"]; - /** @description The category of the image. */ - image_category: components["schemas"]["ImageCategory"]; + key: string; /** - * Width - * @description The width of the image in px. + * Hash + * @description The hash of the model file(s). */ - width: number; + hash: string; /** - * Height - * @description The height of the image in px. + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - height: number; + path: string; /** - * Created At - * @description The created timestamp of the image. + * File Size + * @description The size of the model in bytes. */ - created_at: string; + file_size: number; /** - * Updated At - * @description The updated timestamp of the image. + * Name + * @description Name of the model. */ - updated_at: string; + name: string; /** - * Deleted At - * @description The deleted timestamp of the image. + * Description + * @description Model description */ - deleted_at?: string | null; + description: string | null; /** - * Is Intermediate - * @description Whether this is an intermediate image. + * Source + * @description The original source of the model (path, URL or repo_id). */ - is_intermediate: boolean; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Session Id - * @description The session ID that generated this image, if it is a generated image. + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - session_id?: string | null; + source_api_response: string | null; /** - * Node Id - * @description The node ID that generated this image, if it is a generated image. + * Cover Image + * @description Url for image to preview model */ - node_id?: string | null; + cover_image: string | null; /** - * Starred - * @description Whether this image is starred. + * Usage Info + * @description Usage information for this model */ - starred: boolean; + usage_info: string | null; /** - * Has Workflow - * @description Whether this image has a workflow. + * Type + * @default ip_adapter + * @constant */ - has_workflow: boolean; + type: "ip_adapter"; /** - * Board Id - * @description The id of the board the image belongs to, if one exists. + * Format + * @default checkpoint + * @constant */ - board_id?: string | null; - }; - /** - * ImageField - * @description An image primitive field - */ - ImageField: { + format: "checkpoint"; /** - * Image Name - * @description The name of the image + * Base + * @default sd-1 + * @constant */ - image_name: string; + base: "sd-1"; }; - /** - * Image Generator - * @description Generated a collection of images for use in a batched generation - */ - ImageGenerator: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; + /** IPAdapter_Checkpoint_SD2_Config */ + IPAdapter_Checkpoint_SD2_Config: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Key + * @description A unique key for this model. */ - use_cache?: boolean; + key: string; /** - * Generator Type - * @description The image generator. + * Hash + * @description The hash of the model file(s). */ - generator: components["schemas"]["ImageGeneratorField"]; + hash: string; /** - * type - * @default image_generator - * @constant + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - type: "image_generator"; - }; - /** ImageGeneratorField */ - ImageGeneratorField: Record; - /** - * ImageGeneratorOutput - * @description Base class for nodes that output a collection of boards - */ - ImageGeneratorOutput: { + path: string; /** - * Images - * @description The generated images + * File Size + * @description The size of the model in bytes. */ - images: components["schemas"]["ImageField"][]; + file_size: number; /** - * type - * @default image_generator_output - * @constant + * Name + * @description Name of the model. */ - type: "image_generator_output"; - }; - /** - * Adjust Image Hue - * @description Adjusts the Hue of an image. - */ - ImageHueAdjustmentInvocation: { + name: string; /** - * @description The board to save the image to - * @default null + * Description + * @description Model description */ - board?: components["schemas"]["BoardField"] | null; + description: string | null; /** - * @description Optional metadata to be saved with the image - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - metadata?: components["schemas"]["MetadataField"] | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - id: string; + source_api_response: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Cover Image + * @description Url for image to preview model */ - is_intermediate?: boolean; + cover_image: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Usage Info + * @description Usage information for this model */ - use_cache?: boolean; + usage_info: string | null; /** - * @description The image to adjust - * @default null + * Type + * @default ip_adapter + * @constant */ - image?: components["schemas"]["ImageField"] | null; + type: "ip_adapter"; /** - * Hue - * @description The degrees by which to rotate the hue, 0-360 - * @default 0 + * Format + * @default checkpoint + * @constant */ - hue?: number; + format: "checkpoint"; /** - * type - * @default img_hue_adjust + * Base + * @default sd-2 * @constant */ - type: "img_hue_adjust"; + base: "sd-2"; }; - /** - * Inverse Lerp Image - * @description Inverse linear interpolation of all pixels of an image - */ - ImageInverseLerpInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + /** IPAdapter_Checkpoint_SDXL_Config */ + IPAdapter_Checkpoint_SDXL_Config: { /** - * @description Optional metadata to be saved with the image - * @default null + * Key + * @description A unique key for this model. */ - metadata?: components["schemas"]["MetadataField"] | null; + key: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Hash + * @description The hash of the model file(s). */ - id: string; + hash: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - is_intermediate?: boolean; + path: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * File Size + * @description The size of the model in bytes. */ - use_cache?: boolean; + file_size: number; /** - * @description The image to lerp - * @default null + * Name + * @description Name of the model. */ - image?: components["schemas"]["ImageField"] | null; + name: string; /** - * Min - * @description The minimum input value - * @default 0 + * Description + * @description Model description */ - min?: number; + description: string | null; /** - * Max - * @description The maximum input value - * @default 255 + * Source + * @description The original source of the model (path, URL or repo_id). */ - max?: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * type - * @default img_ilerp - * @constant + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - type: "img_ilerp"; - }; - /** - * Image Primitive - * @description An image primitive value - */ - ImageInvocation: { + source_api_response: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Cover Image + * @description Url for image to preview model */ - id: string; + cover_image: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Usage Info + * @description Usage information for this model */ - is_intermediate?: boolean; + usage_info: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Type + * @default ip_adapter + * @constant */ - use_cache?: boolean; + type: "ip_adapter"; /** - * @description The image to load - * @default null + * Format + * @default checkpoint + * @constant */ - image?: components["schemas"]["ImageField"] | null; + format: "checkpoint"; /** - * type - * @default image + * Base + * @default sdxl * @constant */ - type: "image"; + base: "sdxl"; }; - /** - * Lerp Image - * @description Linear interpolation of all pixels of an image - */ - ImageLerpInvocation: { + /** IPAdapter_InvokeAI_SD1_Config */ + IPAdapter_InvokeAI_SD1_Config: { /** - * @description The board to save the image to - * @default null + * Key + * @description A unique key for this model. */ - board?: components["schemas"]["BoardField"] | null; + key: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Hash + * @description The hash of the model file(s). */ - metadata?: components["schemas"]["MetadataField"] | null; + hash: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - id: string; + path: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * File Size + * @description The size of the model in bytes. */ - is_intermediate?: boolean; + file_size: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Name + * @description Name of the model. */ - use_cache?: boolean; + name: string; /** - * @description The image to lerp - * @default null + * Description + * @description Model description */ - image?: components["schemas"]["ImageField"] | null; + description: string | null; /** - * Min - * @description The minimum output value - * @default 0 + * Source + * @description The original source of the model (path, URL or repo_id). */ - min?: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Max - * @description The maximum output value - * @default 255 + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - max?: number; + source_api_response: string | null; /** - * type - * @default img_lerp + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default ip_adapter * @constant */ - type: "img_lerp"; + type: "ip_adapter"; + /** + * Format + * @default invokeai + * @constant + */ + format: "invokeai"; + /** Image Encoder Model Id */ + image_encoder_model_id: string; + /** + * Base + * @default sd-1 + * @constant + */ + base: "sd-1"; }; - /** - * Image Mask to Tensor - * @description Convert a mask image to a tensor. Converts the image to grayscale and uses thresholding at the specified value. - */ - ImageMaskToTensorInvocation: { + /** IPAdapter_InvokeAI_SD2_Config */ + IPAdapter_InvokeAI_SD2_Config: { /** - * @description Optional metadata to be saved with the image - * @default null + * Key + * @description A unique key for this model. */ - metadata?: components["schemas"]["MetadataField"] | null; + key: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Hash + * @description The hash of the model file(s). */ - id: string; + hash: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - is_intermediate?: boolean; + path: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * File Size + * @description The size of the model in bytes. */ - use_cache?: boolean; + file_size: number; /** - * @description The mask image to convert. - * @default null + * Name + * @description Name of the model. */ - image?: components["schemas"]["ImageField"] | null; + name: string; /** - * Cutoff - * @description Cutoff (<) - * @default 128 + * Description + * @description Model description */ - cutoff?: number; + description: string | null; /** - * Invert - * @description Whether to invert the mask. - * @default false + * Source + * @description The original source of the model (path, URL or repo_id). */ - invert?: boolean; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * type - * @default image_mask_to_tensor + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default ip_adapter * @constant */ - type: "image_mask_to_tensor"; + type: "ip_adapter"; + /** + * Format + * @default invokeai + * @constant + */ + format: "invokeai"; + /** Image Encoder Model Id */ + image_encoder_model_id: string; + /** + * Base + * @default sd-2 + * @constant + */ + base: "sd-2"; }; - /** - * Multiply Images - * @description Multiplies two images together using `PIL.ImageChops.multiply()`. - */ - ImageMultiplyInvocation: { + /** IPAdapter_InvokeAI_SDXL_Config */ + IPAdapter_InvokeAI_SDXL_Config: { /** - * @description The board to save the image to - * @default null + * Key + * @description A unique key for this model. */ - board?: components["schemas"]["BoardField"] | null; + key: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Hash + * @description The hash of the model file(s). */ - metadata?: components["schemas"]["MetadataField"] | null; + hash: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - id: string; + path: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * File Size + * @description The size of the model in bytes. */ - is_intermediate?: boolean; + file_size: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Name + * @description Name of the model. */ - use_cache?: boolean; + name: string; /** - * @description The first image to multiply - * @default null + * Description + * @description Model description */ - image1?: components["schemas"]["ImageField"] | null; + description: string | null; /** - * @description The second image to multiply - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - image2?: components["schemas"]["ImageField"] | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * type - * @default img_mul + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default ip_adapter * @constant */ - type: "img_mul"; - }; - /** - * Blur NSFW Image - * @description Add blur to NSFW-flagged images - */ - ImageNSFWBlurInvocation: { + type: "ip_adapter"; /** - * @description The board to save the image to - * @default null + * Format + * @default invokeai + * @constant */ - board?: components["schemas"]["BoardField"] | null; + format: "invokeai"; + /** Image Encoder Model Id */ + image_encoder_model_id: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Base + * @default sdxl + * @constant */ - metadata?: components["schemas"]["MetadataField"] | null; + base: "sdxl"; + }; + /** + * Ideal Size - SD1.5, SDXL + * @description Calculates the ideal size for generation to avoid duplication + */ + IdealSizeInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -10980,43 +10617,104 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to check + * Width + * @description Final image width + * @default 1024 + */ + width?: number; + /** + * Height + * @description Final image height + * @default 576 + */ + height?: number; + /** + * @description UNet (scheduler, LoRAs) * @default null */ - image?: components["schemas"]["ImageField"] | null; + unet?: components["schemas"]["UNetField"] | null; + /** + * Multiplier + * @description Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large) + * @default 1 + */ + multiplier?: number; /** * type - * @default img_nsfw + * @default ideal_size * @constant */ - type: "img_nsfw"; + type: "ideal_size"; }; /** - * ImageNamesResult - * @description Response containing ordered image names with metadata for optimistic updates. + * IdealSizeOutput + * @description Base class for invocations that output an image */ - ImageNamesResult: { + IdealSizeOutput: { /** - * Image Names - * @description Ordered list of image names + * Width + * @description The ideal width of the image (in pixels) */ - image_names: string[]; + width: number; /** - * Starred Count - * @description Number of starred images (when starred_first=True) + * Height + * @description The ideal height of the image (in pixels) */ - starred_count: number; + height: number; /** - * Total Count - * @description Total number of images matching the query + * type + * @default ideal_size_output + * @constant */ - total_count: number; + type: "ideal_size_output"; }; /** - * Add Image Noise - * @description Add noise to an image + * Image Batch + * @description Create a batched generation, where the workflow is executed once for each image in the batch. */ - ImageNoiseInvocation: { + ImageBatchInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Batch Group + * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. + * @default None + * @enum {string} + */ + batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; + /** + * Images + * @description The images to batch over + * @default null + */ + images?: components["schemas"]["ImageField"][] | null; + /** + * type + * @default image_batch + * @constant + */ + type: "image_batch"; + }; + /** + * Blur Image + * @description Blurs an image + */ + ImageBlurInvocation: { /** * @description The board to save the image to * @default null @@ -11045,112 +10743,108 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to add noise to + * @description The image to blur * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * @description Optional mask determining where to apply noise (black=noise, white=no noise) - * @default null - */ - mask?: components["schemas"]["ImageField"] | null; - /** - * Seed - * @description Seed for random number generation - * @default 0 + * Radius + * @description The blur radius + * @default 8 */ - seed?: number; + radius?: number; /** - * Noise Type - * @description The type of noise to add + * Blur Type + * @description The type of blur * @default gaussian * @enum {string} */ - noise_type?: "gaussian" | "salt_and_pepper"; - /** - * Amount - * @description The amount of noise to add - * @default 0.1 - */ - amount?: number; - /** - * Noise Color - * @description Whether to add colored noise - * @default true - */ - noise_color?: boolean; - /** - * Size - * @description The size of the noise points - * @default 1 - */ - size?: number; + blur_type?: "gaussian" | "box"; /** * type - * @default img_noise + * @default img_blur * @constant */ - type: "img_noise"; + type: "img_blur"; }; /** - * ImageOutput - * @description Base class for nodes that output a single image + * ImageCategory + * @description The category of an image. + * + * - GENERAL: The image is an output, init image, or otherwise an image without a specialized purpose. + * - MASK: The image is a mask image. + * - CONTROL: The image is a ControlNet control image. + * - USER: The image is a user-provide image. + * - OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes. + * @enum {string} */ - ImageOutput: { - /** @description The output image */ - image: components["schemas"]["ImageField"]; + ImageCategory: "general" | "mask" | "control" | "user" | "other"; + /** + * Extract Image Channel + * @description Gets a channel from an image. + */ + ImageChannelInvocation: { /** - * Width - * @description The width of the image in pixels + * @description The board to save the image to + * @default null */ - width: number; + board?: components["schemas"]["BoardField"] | null; /** - * Height - * @description The height of the image in pixels + * @description Optional metadata to be saved with the image + * @default null */ - height: number; + metadata?: components["schemas"]["MetadataField"] | null; /** - * type - * @default image_output - * @constant + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - type: "image_output"; - }; - /** ImagePanelCoordinateOutput */ - ImagePanelCoordinateOutput: { + id: string; /** - * X Left - * @description The left x-coordinate of the panel. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - x_left: number; + is_intermediate?: boolean; /** - * Y Top - * @description The top y-coordinate of the panel. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - y_top: number; + use_cache?: boolean; /** - * Width - * @description The width of the panel. + * @description The image to get the channel from + * @default null */ - width: number; + image?: components["schemas"]["ImageField"] | null; /** - * Height - * @description The height of the panel. + * Channel + * @description The channel to get + * @default A + * @enum {string} */ - height: number; + channel?: "A" | "R" | "G" | "B"; /** * type - * @default image_panel_coordinate_output + * @default img_chan * @constant */ - type: "image_panel_coordinate_output"; + type: "img_chan"; }; /** - * Image Panel Layout - * @description Get the coordinates of a single panel in a grid. (If the full image shape cannot be divided evenly into panels, - * then the grid may not cover the entire image.) + * Multiply Image Channel + * @description Scale a specific color channel of an image. */ - ImagePanelLayoutInvocation: { + ImageChannelMultiplyInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -11169,53 +10863,40 @@ export type components = { */ use_cache?: boolean; /** - * Width - * @description The width of the entire grid. + * @description The image to adjust * @default null */ - width?: number | null; + image?: components["schemas"]["ImageField"] | null; /** - * Height - * @description The height of the entire grid. + * Channel + * @description Which channel to adjust * @default null */ - height?: number | null; - /** - * Num Cols - * @description The number of columns in the grid. - * @default 1 - */ - num_cols?: number; + channel?: ("Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)") | null; /** - * Num Rows - * @description The number of rows in the grid. + * Scale + * @description The amount to scale the channel by. * @default 1 */ - num_rows?: number; - /** - * Panel Col Idx - * @description The column index of the panel to be processed. - * @default 0 - */ - panel_col_idx?: number; + scale?: number; /** - * Panel Row Idx - * @description The row index of the panel to be processed. - * @default 0 + * Invert Channel + * @description Invert the channel after scaling + * @default false */ - panel_row_idx?: number; + invert_channel?: boolean; /** * type - * @default image_panel_layout + * @default img_channel_multiply * @constant */ - type: "image_panel_layout"; + type: "img_channel_multiply"; }; /** - * Paste Image - * @description Pastes an image into another image. + * Offset Image Channel + * @description Add or subtract a value from a specific color channel of an image. */ - ImagePasteInvocation: { + ImageChannelOffsetInvocation: { /** * @description The board to save the image to * @default null @@ -11244,91 +10925,34 @@ export type components = { */ use_cache?: boolean; /** - * @description The base image - * @default null - */ - base_image?: components["schemas"]["ImageField"] | null; - /** - * @description The image to paste + * @description The image to adjust * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * @description The mask to use when pasting + * Channel + * @description Which channel to adjust * @default null */ - mask?: components["schemas"]["ImageField"] | null; - /** - * X - * @description The left x coordinate at which to paste the image - * @default 0 - */ - x?: number; + channel?: ("Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)") | null; /** - * Y - * @description The top y coordinate at which to paste the image + * Offset + * @description The amount to adjust the channel by * @default 0 */ - y?: number; - /** - * Crop - * @description Crop to base image dimensions - * @default false - */ - crop?: boolean; + offset?: number; /** * type - * @default img_paste + * @default img_channel_offset * @constant */ - type: "img_paste"; - }; - /** - * ImageRecordChanges - * @description A set of changes to apply to an image record. - * - * Only limited changes are valid: - * - `image_category`: change the category of an image - * - `session_id`: change the session associated with an image - * - `is_intermediate`: change the image's `is_intermediate` flag - * - `starred`: change whether the image is starred - */ - ImageRecordChanges: { - /** @description The image's new category. */ - image_category?: components["schemas"]["ImageCategory"] | null; - /** - * Session Id - * @description The image's new session ID. - */ - session_id?: string | null; - /** - * Is Intermediate - * @description The image's new `is_intermediate` flag. - */ - is_intermediate?: boolean | null; - /** - * Starred - * @description The image's new `starred` state - */ - starred?: boolean | null; - } & { - [key: string]: unknown; + type: "img_channel_offset"; }; /** - * Resize Image - * @description Resizes an image to specific dimensions + * Image Collection Primitive + * @description A collection of image primitive values */ - ImageResizeInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + ImageCollectionInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -11347,41 +10971,40 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to resize + * Collection + * @description The collection of image values * @default null */ - image?: components["schemas"]["ImageField"] | null; - /** - * Width - * @description The width to resize to (px) - * @default 512 - */ - width?: number; + collection?: components["schemas"]["ImageField"][] | null; /** - * Height - * @description The height to resize to (px) - * @default 512 + * type + * @default image_collection + * @constant */ - height?: number; + type: "image_collection"; + }; + /** + * ImageCollectionOutput + * @description Base class for nodes that output a collection of images + */ + ImageCollectionOutput: { /** - * Resample Mode - * @description The resampling mode - * @default bicubic - * @enum {string} + * Collection + * @description The output images */ - resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + collection: components["schemas"]["ImageField"][]; /** * type - * @default img_resize + * @default image_collection_output * @constant */ - type: "img_resize"; + type: "image_collection_output"; }; /** - * Scale Image - * @description Scales an image by a factor + * Convert Image Mode + * @description Converts an image to a different mode. */ - ImageScaleInvocation: { + ImageConvertInvocation: { /** * @description The board to save the image to * @default null @@ -11410,35 +11033,39 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to scale + * @description The image to convert * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Scale Factor - * @description The factor by which to scale the image - * @default 2 - */ - scale_factor?: number; - /** - * Resample Mode - * @description The resampling mode - * @default bicubic + * Mode + * @description The mode to convert to + * @default L * @enum {string} */ - resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + mode?: "L" | "RGB" | "RGBA" | "CMYK" | "YCbCr" | "LAB" | "HSV" | "I" | "F"; /** * type - * @default img_scale + * @default img_conv * @constant */ - type: "img_scale"; + type: "img_conv"; }; /** - * Image to Latents - SD1.5, SDXL - * @description Encodes an image into latents. + * Crop Image + * @description Crops an image to a specified box. The box can be outside of the image. */ - ImageToLatentsInvocation: { + ImageCropInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -11457,55 +11084,46 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to encode + * @description The image to crop * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * @description VAE - * @default null + * X + * @description The left x coordinate of the crop rectangle + * @default 0 */ - vae?: components["schemas"]["VAEField"] | null; + x?: number; /** - * Tiled - * @description Processing using overlapping tiles (reduce memory consumption) - * @default false + * Y + * @description The top y coordinate of the crop rectangle + * @default 0 */ - tiled?: boolean; + y?: number; /** - * Tile Size - * @description The tile size for VAE tiling in pixels (image space). If set to 0, the default tile size for the model will be used. Larger tile sizes generally produce better results at the cost of higher memory usage. - * @default 0 + * Width + * @description The width of the crop rectangle + * @default 512 */ - tile_size?: number; + width?: number; /** - * Fp32 - * @description Whether or not to use full float32 precision - * @default false + * Height + * @description The height of the crop rectangle + * @default 512 */ - fp32?: boolean; + height?: number; /** * type - * @default i2l + * @default img_crop * @constant */ - type: "i2l"; - }; - /** ImageUploadEntry */ - ImageUploadEntry: { - /** @description The image DTO */ - image_dto: components["schemas"]["ImageDTO"]; - /** - * Presigned Url - * @description The URL to get the presigned URL for the image upload - */ - presigned_url: string; + type: "img_crop"; }; /** - * ImageUrlsDTO - * @description The URLs for an image and its thumbnail. + * ImageDTO + * @description Deserialized image record, enriched for the frontend. */ - ImageUrlsDTO: { + ImageDTO: { /** * Image Name * @description The unique name of the image. @@ -11521,85 +11139,82 @@ export type components = { * @description The URL of the image's thumbnail. */ thumbnail_url: string; - }; - /** - * Add Invisible Watermark - * @description Add an invisible watermark to an image - */ - ImageWatermarkInvocation: { + /** @description The type of the image. */ + image_origin: components["schemas"]["ResourceOrigin"]; + /** @description The category of the image. */ + image_category: components["schemas"]["ImageCategory"]; /** - * @description The board to save the image to - * @default null + * Width + * @description The width of the image in px. */ - board?: components["schemas"]["BoardField"] | null; + width: number; /** - * @description Optional metadata to be saved with the image - * @default null + * Height + * @description The height of the image in px. */ - metadata?: components["schemas"]["MetadataField"] | null; + height: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Created At + * @description The created timestamp of the image. */ - id: string; + created_at: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Updated At + * @description The updated timestamp of the image. */ - is_intermediate?: boolean; + updated_at: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Deleted At + * @description The deleted timestamp of the image. */ - use_cache?: boolean; + deleted_at?: string | null; /** - * @description The image to check - * @default null - */ - image?: components["schemas"]["ImageField"] | null; + * Is Intermediate + * @description Whether this is an intermediate image. + */ + is_intermediate: boolean; /** - * Text - * @description Watermark text - * @default InvokeAI + * Session Id + * @description The session ID that generated this image, if it is a generated image. */ - text?: string; + session_id?: string | null; /** - * type - * @default img_watermark - * @constant + * Node Id + * @description The node ID that generated this image, if it is a generated image. */ - type: "img_watermark"; - }; - /** ImagesDownloaded */ - ImagesDownloaded: { + node_id?: string | null; /** - * Response - * @description The message to display to the user when images begin downloading + * Starred + * @description Whether this image is starred. */ - response?: string | null; + starred: boolean; /** - * Bulk Download Item Name - * @description The name of the bulk download item for which events will be emitted + * Has Workflow + * @description Whether this image has a workflow. */ - bulk_download_item_name?: string | null; + has_workflow: boolean; + /** + * Board Id + * @description The id of the board the image belongs to, if one exists. + */ + board_id?: string | null; }; /** - * Solid Color Infill - * @description Infills transparent areas of an image with a solid color + * ImageField + * @description An image primitive field */ - InfillColorInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + ImageField: { /** - * @description Optional metadata to be saved with the image - * @default null + * Image Name + * @description The name of the image */ - metadata?: components["schemas"]["MetadataField"] | null; + image_name: string; + }; + /** + * Image Generator + * @description Generated a collection of images for use in a batched generation + */ + ImageGenerator: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -11618,32 +11233,41 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process - * @default null + * Generator Type + * @description The image generator. */ - image?: components["schemas"]["ImageField"] | null; + generator: components["schemas"]["ImageGeneratorField"]; /** - * @description The color to use to infill - * @default { - * "r": 127, - * "g": 127, - * "b": 127, - * "a": 255 - * } + * type + * @default image_generator + * @constant */ - color?: components["schemas"]["ColorField"]; + type: "image_generator"; + }; + /** ImageGeneratorField */ + ImageGeneratorField: Record; + /** + * ImageGeneratorOutput + * @description Base class for nodes that output a collection of boards + */ + ImageGeneratorOutput: { + /** + * Images + * @description The generated images + */ + images: components["schemas"]["ImageField"][]; /** * type - * @default infill_rgba + * @default image_generator_output * @constant */ - type: "infill_rgba"; + type: "image_generator_output"; }; /** - * PatchMatch Infill - * @description Infills transparent areas of an image using the PatchMatch algorithm + * Adjust Image Hue + * @description Adjusts the Hue of an image. */ - InfillPatchMatchInvocation: { + ImageHueAdjustmentInvocation: { /** * @description The board to save the image to * @default null @@ -11672,35 +11296,28 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process + * @description The image to adjust * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Downscale - * @description Run patchmatch on downscaled image to speedup infill - * @default 2 - */ - downscale?: number; - /** - * Resample Mode - * @description The resampling mode - * @default bicubic - * @enum {string} + * Hue + * @description The degrees by which to rotate the hue, 0-360 + * @default 0 */ - resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + hue?: number; /** * type - * @default infill_patchmatch + * @default img_hue_adjust * @constant */ - type: "infill_patchmatch"; + type: "img_hue_adjust"; }; /** - * Tile Infill - * @description Infills transparent areas of an image with tiles of the image + * Inverse Lerp Image + * @description Inverse linear interpolation of all pixels of an image */ - InfillTileInvocation: { + ImageInverseLerpInvocation: { /** * @description The board to save the image to * @default null @@ -11729,114 +11346,78 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process + * @description The image to lerp * @default null */ image?: components["schemas"]["ImageField"] | null; /** - * Tile Size - * @description The tile size (px) - * @default 32 + * Min + * @description The minimum input value + * @default 0 */ - tile_size?: number; + min?: number; /** - * Seed - * @description The seed to use for tile generation (omit for random) - * @default 0 + * Max + * @description The maximum input value + * @default 255 */ - seed?: number; + max?: number; /** * type - * @default infill_tile + * @default img_ilerp * @constant */ - type: "infill_tile"; + type: "img_ilerp"; }; /** - * Input - * @description The type of input a field accepts. - * - `Input.Direct`: The field must have its value provided directly, when the invocation and field are instantiated. - * - `Input.Connection`: The field must have its value provided by a connection. - * - `Input.Any`: The field may have its value provided either directly or by a connection. - * @enum {string} - */ - Input: "connection" | "direct" | "any"; - /** - * InputFieldJSONSchemaExtra - * @description Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution, - * and by the workflow editor during schema parsing and UI rendering. + * Image Primitive + * @description An image primitive value */ - InputFieldJSONSchemaExtra: { - input: components["schemas"]["Input"]; - field_kind: components["schemas"]["FieldKind"]; - /** - * Orig Required - * @default true - */ - orig_required: boolean; - /** - * Default - * @default null - */ - default: unknown | null; + ImageInvocation: { /** - * Orig Default - * @default null + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - orig_default: unknown | null; + id: string; /** - * Ui Hidden + * Is Intermediate + * @description Whether or not this is an intermediate invocation. * @default false */ - ui_hidden: boolean; - /** @default null */ - ui_type: components["schemas"]["UIType"] | null; - /** @default null */ - ui_component: components["schemas"]["UIComponent"] | null; - /** - * Ui Order - * @default null - */ - ui_order: number | null; + is_intermediate?: boolean; /** - * Ui Choice Labels - * @default null + * Use Cache + * @description Whether or not to use the cache + * @default true */ - ui_choice_labels: { - [key: string]: string; - } | null; + use_cache?: boolean; /** - * Ui Model Base + * @description The image to load * @default null */ - ui_model_base: components["schemas"]["BaseModelType"][] | null; + image?: components["schemas"]["ImageField"] | null; /** - * Ui Model Type - * @default null + * type + * @default image + * @constant */ - ui_model_type: components["schemas"]["ModelType"][] | null; + type: "image"; + }; + /** + * Lerp Image + * @description Linear interpolation of all pixels of an image + */ + ImageLerpInvocation: { /** - * Ui Model Variant + * @description The board to save the image to * @default null */ - ui_model_variant: (components["schemas"]["ClipVariantType"] | components["schemas"]["ModelVariantType"])[] | null; + board?: components["schemas"]["BoardField"] | null; /** - * Ui Model Format + * @description Optional metadata to be saved with the image * @default null */ - ui_model_format: components["schemas"]["ModelFormat"][] | null; - }; - /** - * InstallStatus - * @description State of an install job running in the background. - * @enum {string} - */ - InstallStatus: "waiting" | "downloading" | "downloads_done" | "running" | "completed" | "error" | "cancelled"; - /** - * Integer Batch - * @description Create a batched generation, where the workflow is executed once for each integer in the batch. - */ - IntegerBatchInvocation: { + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -11855,30 +11436,39 @@ export type components = { */ use_cache?: boolean; /** - * Batch Group - * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. - * @default None - * @enum {string} - */ - batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; - /** - * Integers - * @description The integers to batch over + * @description The image to lerp * @default null */ - integers?: number[] | null; + image?: components["schemas"]["ImageField"] | null; + /** + * Min + * @description The minimum output value + * @default 0 + */ + min?: number; + /** + * Max + * @description The maximum output value + * @default 255 + */ + max?: number; /** * type - * @default integer_batch + * @default img_lerp * @constant */ - type: "integer_batch"; + type: "img_lerp"; }; /** - * Integer Collection Primitive - * @description A collection of integer primitive values + * Image Mask to Tensor + * @description Convert a mask image to a tensor. Converts the image to grayscale and uses thresholding at the specified value. */ - IntegerCollectionInvocation: { + ImageMaskToTensorInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -11897,40 +11487,44 @@ export type components = { */ use_cache?: boolean; /** - * Collection - * @description The collection of integer values - * @default [] + * @description The mask image to convert. + * @default null */ - collection?: number[]; + image?: components["schemas"]["ImageField"] | null; /** - * type - * @default integer_collection - * @constant + * Cutoff + * @description Cutoff (<) + * @default 128 */ - type: "integer_collection"; - }; - /** - * IntegerCollectionOutput - * @description Base class for nodes that output a collection of integers - */ - IntegerCollectionOutput: { + cutoff?: number; /** - * Collection - * @description The int collection + * Invert + * @description Whether to invert the mask. + * @default false */ - collection: number[]; + invert?: boolean; /** * type - * @default integer_collection_output + * @default image_mask_to_tensor * @constant */ - type: "integer_collection_output"; + type: "image_mask_to_tensor"; }; /** - * Integer Generator - * @description Generated a range of integers for use in a batched generation + * Multiply Images + * @description Multiplies two images together using `PIL.ImageChops.multiply()`. */ - IntegerGenerator: { + ImageMultiplyInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -11949,38 +11543,37 @@ export type components = { */ use_cache?: boolean; /** - * Generator Type - * @description The integer generator. - */ - generator: components["schemas"]["IntegerGeneratorField"]; - /** - * type - * @default integer_generator - * @constant + * @description The first image to multiply + * @default null */ - type: "integer_generator"; - }; - /** IntegerGeneratorField */ - IntegerGeneratorField: Record; - /** IntegerGeneratorOutput */ - IntegerGeneratorOutput: { + image1?: components["schemas"]["ImageField"] | null; /** - * Integers - * @description The generated integers + * @description The second image to multiply + * @default null */ - integers: number[]; + image2?: components["schemas"]["ImageField"] | null; /** * type - * @default integer_generator_output + * @default img_mul * @constant */ - type: "integer_generator_output"; + type: "img_mul"; }; /** - * Integer Primitive - * @description An integer primitive value + * Blur NSFW Image + * @description Add blur to NSFW-flagged images */ - IntegerInvocation: { + ImageNSFWBlurInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -11999,88 +11592,53 @@ export type components = { */ use_cache?: boolean; /** - * Value - * @description The integer value - * @default 0 + * @description The image to check + * @default null */ - value?: number; + image?: components["schemas"]["ImageField"] | null; /** * type - * @default integer + * @default img_nsfw * @constant */ - type: "integer"; + type: "img_nsfw"; }; /** - * Integer Math - * @description Performs integer math. + * ImageNamesResult + * @description Response containing ordered image names with metadata for optimistic updates. */ - IntegerMathInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * Operation - * @description The operation to perform - * @default ADD - * @enum {string} - */ - operation?: "ADD" | "SUB" | "MUL" | "DIV" | "EXP" | "MOD" | "ABS" | "MIN" | "MAX"; + ImageNamesResult: { /** - * A - * @description The first number - * @default 1 + * Image Names + * @description Ordered list of image names */ - a?: number; + image_names: string[]; /** - * B - * @description The second number - * @default 1 + * Starred Count + * @description Number of starred images (when starred_first=True) */ - b?: number; + starred_count: number; /** - * type - * @default integer_math - * @constant + * Total Count + * @description Total number of images matching the query */ - type: "integer_math"; + total_count: number; }; /** - * IntegerOutput - * @description Base class for nodes that output a single integer + * Add Image Noise + * @description Add noise to an image */ - IntegerOutput: { + ImageNoiseInvocation: { /** - * Value - * @description The output integer + * @description The board to save the image to + * @default null */ - value: number; + board?: components["schemas"]["BoardField"] | null; /** - * type - * @default integer_output - * @constant + * @description Optional metadata to be saved with the image + * @default null */ - type: "integer_output"; - }; - /** - * Invert Tensor Mask - * @description Inverts a tensor mask. - */ - InvertTensorMaskInvocation: { + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -12099,188 +11657,1242 @@ export type components = { */ use_cache?: boolean; /** - * @description The tensor mask to convert. + * @description The image to add noise to * @default null */ - mask?: components["schemas"]["TensorField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * type - * @default invert_tensor_mask - * @constant + * @description Optional mask determining where to apply noise (black=noise, white=no noise) + * @default null */ - type: "invert_tensor_mask"; - }; - /** InvocationCacheStatus */ - InvocationCacheStatus: { + mask?: components["schemas"]["ImageField"] | null; /** - * Size - * @description The current size of the invocation cache + * Seed + * @description Seed for random number generation + * @default 0 */ - size: number; + seed?: number; /** - * Hits - * @description The number of cache hits + * Noise Type + * @description The type of noise to add + * @default gaussian + * @enum {string} */ - hits: number; + noise_type?: "gaussian" | "salt_and_pepper"; /** - * Misses - * @description The number of cache misses + * Amount + * @description The amount of noise to add + * @default 0.1 */ - misses: number; + amount?: number; /** - * Enabled - * @description Whether the invocation cache is enabled + * Noise Color + * @description Whether to add colored noise + * @default true */ - enabled: boolean; + noise_color?: boolean; /** - * Max Size - * @description The maximum size of the invocation cache + * Size + * @description The size of the noise points + * @default 1 */ - max_size: number; + size?: number; + /** + * type + * @default img_noise + * @constant + */ + type: "img_noise"; }; /** - * InvocationCompleteEvent - * @description Event model for invocation_complete + * ImageOutput + * @description Base class for nodes that output a single image */ - InvocationCompleteEvent: { + ImageOutput: { + /** @description The output image */ + image: components["schemas"]["ImageField"]; /** - * Timestamp - * @description The timestamp of the event + * Width + * @description The width of the image in pixels */ - timestamp: number; + width: number; /** - * Queue Id - * @description The ID of the queue + * Height + * @description The height of the image in pixels */ - queue_id: string; + height: number; /** - * Item Id - * @description The ID of the queue item + * type + * @default image_output + * @constant */ - item_id: number; + type: "image_output"; + }; + /** ImagePanelCoordinateOutput */ + ImagePanelCoordinateOutput: { /** - * Batch Id - * @description The ID of the queue batch + * X Left + * @description The left x-coordinate of the panel. */ - batch_id: string; + x_left: number; /** - * Origin - * @description The origin of the queue item + * Y Top + * @description The top y-coordinate of the panel. + */ + y_top: number; + /** + * Width + * @description The width of the panel. + */ + width: number; + /** + * Height + * @description The height of the panel. + */ + height: number; + /** + * type + * @default image_panel_coordinate_output + * @constant + */ + type: "image_panel_coordinate_output"; + }; + /** + * Image Panel Layout + * @description Get the coordinates of a single panel in a grid. (If the full image shape cannot be divided evenly into panels, + * then the grid may not cover the entire image.) + */ + ImagePanelLayoutInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Width + * @description The width of the entire grid. * @default null */ - origin: string | null; + width?: number | null; /** - * Destination - * @description The destination of the queue item + * Height + * @description The height of the entire grid. * @default null */ - destination: string | null; + height?: number | null; /** - * Session Id - * @description The ID of the session (aka graph execution state) + * Num Cols + * @description The number of columns in the grid. + * @default 1 */ - session_id: string; + num_cols?: number; /** - * Invocation - * @description The ID of the invocation + * Num Rows + * @description The number of rows in the grid. + * @default 1 */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + num_rows?: number; /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node + * Panel Col Idx + * @description The column index of the panel to be processed. + * @default 0 */ - invocation_source_id: string; + panel_col_idx?: number; /** - * Result - * @description The result of the invocation + * Panel Row Idx + * @description The row index of the panel to be processed. + * @default 0 */ - result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["VideoOutput"]; + panel_row_idx?: number; + /** + * type + * @default image_panel_layout + * @constant + */ + type: "image_panel_layout"; }; /** - * InvocationErrorEvent - * @description Event model for invocation_error + * Paste Image + * @description Pastes an image into another image. */ - InvocationErrorEvent: { + ImagePasteInvocation: { /** - * Timestamp - * @description The timestamp of the event + * @description The board to save the image to + * @default null */ - timestamp: number; + board?: components["schemas"]["BoardField"] | null; /** - * Queue Id - * @description The ID of the queue + * @description Optional metadata to be saved with the image + * @default null */ - queue_id: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Item Id - * @description The ID of the queue item + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - item_id: number; + id: string; /** - * Batch Id - * @description The ID of the queue batch + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - batch_id: string; + is_intermediate?: boolean; /** - * Origin - * @description The origin of the queue item + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The base image * @default null */ - origin: string | null; + base_image?: components["schemas"]["ImageField"] | null; /** - * Destination - * @description The destination of the queue item + * @description The image to paste * @default null */ - destination: string | null; + image?: components["schemas"]["ImageField"] | null; /** - * Session Id - * @description The ID of the session (aka graph execution state) + * @description The mask to use when pasting + * @default null */ - session_id: string; + mask?: components["schemas"]["ImageField"] | null; /** - * Invocation - * @description The ID of the invocation + * X + * @description The left x coordinate at which to paste the image + * @default 0 */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + x?: number; /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node + * Y + * @description The top y coordinate at which to paste the image + * @default 0 */ - invocation_source_id: string; + y?: number; /** - * Error Type - * @description The error type + * Crop + * @description Crop to base image dimensions + * @default false */ - error_type: string; + crop?: boolean; /** - * Error Message - * @description The error message + * type + * @default img_paste + * @constant */ - error_message: string; + type: "img_paste"; + }; + /** + * ImageRecordChanges + * @description A set of changes to apply to an image record. + * + * Only limited changes are valid: + * - `image_category`: change the category of an image + * - `session_id`: change the session associated with an image + * - `is_intermediate`: change the image's `is_intermediate` flag + * - `starred`: change whether the image is starred + */ + ImageRecordChanges: { + /** @description The image's new category. */ + image_category?: components["schemas"]["ImageCategory"] | null; /** - * Error Traceback - * @description The error traceback + * Session Id + * @description The image's new session ID. */ - error_traceback: string; + session_id?: string | null; /** - * User Id - * @description The ID of the user who created the invocation - * @default null + * Is Intermediate + * @description The image's new `is_intermediate` flag. */ - user_id: string | null; + is_intermediate?: boolean | null; /** - * Project Id - * @description The ID of the user who created the invocation - * @default null + * Starred + * @description The image's new `starred` state */ - project_id: string | null; + starred?: boolean | null; + } & { + [key: string]: unknown; }; - InvocationOutputMap: { - add: components["schemas"]["IntegerOutput"]; - alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; - apply_mask_to_image: components["schemas"]["ImageOutput"]; - apply_tensor_mask_to_image: components["schemas"]["ImageOutput"]; + /** + * Resize Image + * @description Resizes an image to specific dimensions + */ + ImageResizeInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to resize + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Width + * @description The width to resize to (px) + * @default 512 + */ + width?: number; + /** + * Height + * @description The height to resize to (px) + * @default 512 + */ + height?: number; + /** + * Resample Mode + * @description The resampling mode + * @default bicubic + * @enum {string} + */ + resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + /** + * type + * @default img_resize + * @constant + */ + type: "img_resize"; + }; + /** + * Scale Image + * @description Scales an image by a factor + */ + ImageScaleInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to scale + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Scale Factor + * @description The factor by which to scale the image + * @default 2 + */ + scale_factor?: number; + /** + * Resample Mode + * @description The resampling mode + * @default bicubic + * @enum {string} + */ + resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + /** + * type + * @default img_scale + * @constant + */ + type: "img_scale"; + }; + /** + * Image to Latents - SD1.5, SDXL + * @description Encodes an image into latents. + */ + ImageToLatentsInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to encode + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * @description VAE + * @default null + */ + vae?: components["schemas"]["VAEField"] | null; + /** + * Tiled + * @description Processing using overlapping tiles (reduce memory consumption) + * @default false + */ + tiled?: boolean; + /** + * Tile Size + * @description The tile size for VAE tiling in pixels (image space). If set to 0, the default tile size for the model will be used. Larger tile sizes generally produce better results at the cost of higher memory usage. + * @default 0 + */ + tile_size?: number; + /** + * Fp32 + * @description Whether or not to use full float32 precision + * @default false + */ + fp32?: boolean; + /** + * type + * @default i2l + * @constant + */ + type: "i2l"; + }; + /** ImageUploadEntry */ + ImageUploadEntry: { + /** @description The image DTO */ + image_dto: components["schemas"]["ImageDTO"]; + /** + * Presigned Url + * @description The URL to get the presigned URL for the image upload + */ + presigned_url: string; + }; + /** + * ImageUrlsDTO + * @description The URLs for an image and its thumbnail. + */ + ImageUrlsDTO: { + /** + * Image Name + * @description The unique name of the image. + */ + image_name: string; + /** + * Image Url + * @description The URL of the image. + */ + image_url: string; + /** + * Thumbnail Url + * @description The URL of the image's thumbnail. + */ + thumbnail_url: string; + }; + /** + * Add Invisible Watermark + * @description Add an invisible watermark to an image + */ + ImageWatermarkInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to check + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Text + * @description Watermark text + * @default InvokeAI + */ + text?: string; + /** + * type + * @default img_watermark + * @constant + */ + type: "img_watermark"; + }; + /** ImagesDownloaded */ + ImagesDownloaded: { + /** + * Response + * @description The message to display to the user when images begin downloading + */ + response?: string | null; + /** + * Bulk Download Item Name + * @description The name of the bulk download item for which events will be emitted + */ + bulk_download_item_name?: string | null; + }; + /** + * Solid Color Infill + * @description Infills transparent areas of an image with a solid color + */ + InfillColorInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to process + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * @description The color to use to infill + * @default { + * "r": 127, + * "g": 127, + * "b": 127, + * "a": 255 + * } + */ + color?: components["schemas"]["ColorField"]; + /** + * type + * @default infill_rgba + * @constant + */ + type: "infill_rgba"; + }; + /** + * PatchMatch Infill + * @description Infills transparent areas of an image using the PatchMatch algorithm + */ + InfillPatchMatchInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to process + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Downscale + * @description Run patchmatch on downscaled image to speedup infill + * @default 2 + */ + downscale?: number; + /** + * Resample Mode + * @description The resampling mode + * @default bicubic + * @enum {string} + */ + resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; + /** + * type + * @default infill_patchmatch + * @constant + */ + type: "infill_patchmatch"; + }; + /** + * Tile Infill + * @description Infills transparent areas of an image with tiles of the image + */ + InfillTileInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to process + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Tile Size + * @description The tile size (px) + * @default 32 + */ + tile_size?: number; + /** + * Seed + * @description The seed to use for tile generation (omit for random) + * @default 0 + */ + seed?: number; + /** + * type + * @default infill_tile + * @constant + */ + type: "infill_tile"; + }; + /** + * Input + * @description The type of input a field accepts. + * - `Input.Direct`: The field must have its value provided directly, when the invocation and field are instantiated. + * - `Input.Connection`: The field must have its value provided by a connection. + * - `Input.Any`: The field may have its value provided either directly or by a connection. + * @enum {string} + */ + Input: "connection" | "direct" | "any"; + /** + * InputFieldJSONSchemaExtra + * @description Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution, + * and by the workflow editor during schema parsing and UI rendering. + */ + InputFieldJSONSchemaExtra: { + input: components["schemas"]["Input"]; + field_kind: components["schemas"]["FieldKind"]; + /** + * Orig Required + * @default true + */ + orig_required: boolean; + /** + * Default + * @default null + */ + default: unknown | null; + /** + * Orig Default + * @default null + */ + orig_default: unknown | null; + /** + * Ui Hidden + * @default false + */ + ui_hidden: boolean; + /** @default null */ + ui_type: components["schemas"]["UIType"] | null; + /** @default null */ + ui_component: components["schemas"]["UIComponent"] | null; + /** + * Ui Order + * @default null + */ + ui_order: number | null; + /** + * Ui Choice Labels + * @default null + */ + ui_choice_labels: { + [key: string]: string; + } | null; + /** + * Ui Model Base + * @default null + */ + ui_model_base: components["schemas"]["BaseModelType"][] | null; + /** + * Ui Model Type + * @default null + */ + ui_model_type: components["schemas"]["ModelType"][] | null; + /** + * Ui Model Variant + * @default null + */ + ui_model_variant: (components["schemas"]["ClipVariantType"] | components["schemas"]["ModelVariantType"])[] | null; + /** + * Ui Model Format + * @default null + */ + ui_model_format: components["schemas"]["ModelFormat"][] | null; + }; + /** + * InstallStatus + * @description State of an install job running in the background. + * @enum {string} + */ + InstallStatus: "waiting" | "downloading" | "downloads_done" | "running" | "completed" | "error" | "cancelled"; + /** + * Integer Batch + * @description Create a batched generation, where the workflow is executed once for each integer in the batch. + */ + IntegerBatchInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Batch Group + * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. + * @default None + * @enum {string} + */ + batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; + /** + * Integers + * @description The integers to batch over + * @default null + */ + integers?: number[] | null; + /** + * type + * @default integer_batch + * @constant + */ + type: "integer_batch"; + }; + /** + * Integer Collection Primitive + * @description A collection of integer primitive values + */ + IntegerCollectionInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Collection + * @description The collection of integer values + * @default [] + */ + collection?: number[]; + /** + * type + * @default integer_collection + * @constant + */ + type: "integer_collection"; + }; + /** + * IntegerCollectionOutput + * @description Base class for nodes that output a collection of integers + */ + IntegerCollectionOutput: { + /** + * Collection + * @description The int collection + */ + collection: number[]; + /** + * type + * @default integer_collection_output + * @constant + */ + type: "integer_collection_output"; + }; + /** + * Integer Generator + * @description Generated a range of integers for use in a batched generation + */ + IntegerGenerator: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Generator Type + * @description The integer generator. + */ + generator: components["schemas"]["IntegerGeneratorField"]; + /** + * type + * @default integer_generator + * @constant + */ + type: "integer_generator"; + }; + /** IntegerGeneratorField */ + IntegerGeneratorField: Record; + /** IntegerGeneratorOutput */ + IntegerGeneratorOutput: { + /** + * Integers + * @description The generated integers + */ + integers: number[]; + /** + * type + * @default integer_generator_output + * @constant + */ + type: "integer_generator_output"; + }; + /** + * Integer Primitive + * @description An integer primitive value + */ + IntegerInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Value + * @description The integer value + * @default 0 + */ + value?: number; + /** + * type + * @default integer + * @constant + */ + type: "integer"; + }; + /** + * Integer Math + * @description Performs integer math. + */ + IntegerMathInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Operation + * @description The operation to perform + * @default ADD + * @enum {string} + */ + operation?: "ADD" | "SUB" | "MUL" | "DIV" | "EXP" | "MOD" | "ABS" | "MIN" | "MAX"; + /** + * A + * @description The first number + * @default 1 + */ + a?: number; + /** + * B + * @description The second number + * @default 1 + */ + b?: number; + /** + * type + * @default integer_math + * @constant + */ + type: "integer_math"; + }; + /** + * IntegerOutput + * @description Base class for nodes that output a single integer + */ + IntegerOutput: { + /** + * Value + * @description The output integer + */ + value: number; + /** + * type + * @default integer_output + * @constant + */ + type: "integer_output"; + }; + /** + * Invert Tensor Mask + * @description Inverts a tensor mask. + */ + InvertTensorMaskInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The tensor mask to convert. + * @default null + */ + mask?: components["schemas"]["TensorField"] | null; + /** + * type + * @default invert_tensor_mask + * @constant + */ + type: "invert_tensor_mask"; + }; + /** InvocationCacheStatus */ + InvocationCacheStatus: { + /** + * Size + * @description The current size of the invocation cache + */ + size: number; + /** + * Hits + * @description The number of cache hits + */ + hits: number; + /** + * Misses + * @description The number of cache misses + */ + misses: number; + /** + * Enabled + * @description Whether the invocation cache is enabled + */ + enabled: boolean; + /** + * Max Size + * @description The maximum size of the invocation cache + */ + max_size: number; + }; + /** + * InvocationCompleteEvent + * @description Event model for invocation_complete + */ + InvocationCompleteEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Origin + * @description The origin of the queue item + * @default null + */ + origin: string | null; + /** + * Destination + * @description The destination of the queue item + * @default null + */ + destination: string | null; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + /** + * Result + * @description The result of the invocation + */ + result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["VideoOutput"]; + }; + /** + * InvocationErrorEvent + * @description Event model for invocation_error + */ + InvocationErrorEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Origin + * @description The origin of the queue item + * @default null + */ + origin: string | null; + /** + * Destination + * @description The destination of the queue item + * @default null + */ + destination: string | null; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + /** + * Error Type + * @description The error type + */ + error_type: string; + /** + * Error Message + * @description The error message + */ + error_message: string; + /** + * Error Traceback + * @description The error traceback + */ + error_traceback: string; + /** + * User Id + * @description The ID of the user who created the invocation + * @default null + */ + user_id: string | null; + /** + * Project Id + * @description The ID of the user who created the invocation + * @default null + */ + project_id: string | null; + }; + InvocationOutputMap: { + add: components["schemas"]["IntegerOutput"]; + alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; + apply_mask_to_image: components["schemas"]["ImageOutput"]; + apply_tensor_mask_to_image: components["schemas"]["ImageOutput"]; blank_image: components["schemas"]["ImageOutput"]; boolean: components["schemas"]["BooleanOutput"]; boolean_collection: components["schemas"]["BooleanCollectionOutput"]; @@ -12490,1527 +13102,4058 @@ export type components = { vae_loader: components["schemas"]["VAEOutput"]; }; /** - * InvocationProgressEvent - * @description Event model for invocation_progress + * InvocationProgressEvent + * @description Event model for invocation_progress + */ + InvocationProgressEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Origin + * @description The origin of the queue item + * @default null + */ + origin: string | null; + /** + * Destination + * @description The destination of the queue item + * @default null + */ + destination: string | null; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + /** + * Message + * @description A message to display + */ + message: string; + /** + * Percentage + * @description The percentage of the progress (omit to indicate indeterminate progress) + * @default null + */ + percentage: number | null; + /** + * @description An image representing the current state of the progress + * @default null + */ + image: components["schemas"]["ProgressImage"] | null; + }; + /** + * InvocationStartedEvent + * @description Event model for invocation_started + */ + InvocationStartedEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Origin + * @description The origin of the queue item + * @default null + */ + origin: string | null; + /** + * Destination + * @description The destination of the queue item + * @default null + */ + destination: string | null; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + }; + /** + * InvokeAIAppConfig + * @description Invoke's global app configuration. + * + * Typically, you won't need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object. + * + * Attributes: + * host: IP address to bind to. Use `0.0.0.0` to serve to your local network. + * port: Port to bind to. + * allow_origins: Allowed CORS origins. + * allow_credentials: Allow CORS credentials. + * allow_methods: Methods allowed for CORS. + * allow_headers: Headers allowed for CORS. + * ssl_certfile: SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https. + * ssl_keyfile: SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https. + * log_tokenization: Enable logging of parsed prompt tokens. + * patchmatch: Enable patchmatch inpaint code. + * models_dir: Path to the models directory. + * convert_cache_dir: Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions). + * download_cache_dir: Path to the directory that contains dynamically downloaded models. + * legacy_conf_dir: Path to directory of legacy checkpoint config files. + * db_dir: Path to InvokeAI databases directory. + * outputs_dir: Path to directory for outputs. + * custom_nodes_dir: Path to directory for custom nodes. + * style_presets_dir: Path to directory for style presets. + * workflow_thumbnails_dir: Path to directory for workflow thumbnails. + * log_handlers: Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http=". + * log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.
Valid values: `plain`, `color`, `syslog`, `legacy` + * log_level: Emit logging messages at this level or higher.
Valid values: `debug`, `info`, `warning`, `error`, `critical` + * log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose. + * log_level_network: Log level for network-related messages. 'info' and 'debug' are very verbose.
Valid values: `debug`, `info`, `warning`, `error`, `critical` + * use_memory_db: Use in-memory database. Useful for development. + * dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions. + * profile_graphs: Enable graph profiling using `cProfile`. + * profile_prefix: An optional prefix for profile output files. + * profiles_dir: Path to profiles output directory. + * max_cache_ram_gb: The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset. + * max_cache_vram_gb: The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset. + * log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour. + * device_working_mem_gb: The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value. + * enable_partial_loading: Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM. + * keep_ram_copy_of_weights: Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high. + * ram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable. + * vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable. + * lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable. + * pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally. + * device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number) + * precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
Valid values: `auto`, `float16`, `bfloat16`, `float32` + * sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements. + * attention_type: Attention type.
Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp` + * attention_slice_size: Slice size, valid when attention_type=="sliced".
Valid values: `auto`, `balanced`, `max`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8` + * force_tiled_decode: Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty). + * pil_compress_level: The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting. + * max_queue_size: Maximum number of items in the session queue. + * clear_queue_on_startup: Empties session queue on startup. + * allow_nodes: List of nodes to allow. Omit to allow all. + * deny_nodes: List of nodes to deny. Omit to deny none. + * node_cache_size: How many cached nodes to keep in memory. + * hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.
Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256` + * remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token. + * scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes. + * unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production. + * allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation. + */ + InvokeAIAppConfig: { + /** + * Schema Version + * @description Schema version of the config file. This is not a user-configurable setting. + * @default 4.0.2 + */ + schema_version?: string; + /** + * Legacy Models Yaml Path + * @description Path to the legacy models.yaml file. This is not a user-configurable setting. + */ + legacy_models_yaml_path?: string | null; + /** + * Host + * @description IP address to bind to. Use `0.0.0.0` to serve to your local network. + * @default 127.0.0.1 + */ + host?: string; + /** + * Port + * @description Port to bind to. + * @default 9090 + */ + port?: number; + /** + * Allow Origins + * @description Allowed CORS origins. + * @default [] + */ + allow_origins?: string[]; + /** + * Allow Credentials + * @description Allow CORS credentials. + * @default true + */ + allow_credentials?: boolean; + /** + * Allow Methods + * @description Methods allowed for CORS. + * @default [ + * "*" + * ] + */ + allow_methods?: string[]; + /** + * Allow Headers + * @description Headers allowed for CORS. + * @default [ + * "*" + * ] + */ + allow_headers?: string[]; + /** + * Ssl Certfile + * @description SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https. + */ + ssl_certfile?: string | null; + /** + * Ssl Keyfile + * @description SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https. + */ + ssl_keyfile?: string | null; + /** + * Log Tokenization + * @description Enable logging of parsed prompt tokens. + * @default false + */ + log_tokenization?: boolean; + /** + * Patchmatch + * @description Enable patchmatch inpaint code. + * @default true + */ + patchmatch?: boolean; + /** + * Models Dir + * Format: path + * @description Path to the models directory. + * @default models + */ + models_dir?: string; + /** + * Convert Cache Dir + * Format: path + * @description Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions). + * @default models/.convert_cache + */ + convert_cache_dir?: string; + /** + * Download Cache Dir + * Format: path + * @description Path to the directory that contains dynamically downloaded models. + * @default models/.download_cache + */ + download_cache_dir?: string; + /** + * Legacy Conf Dir + * Format: path + * @description Path to directory of legacy checkpoint config files. + * @default configs + */ + legacy_conf_dir?: string; + /** + * Db Dir + * Format: path + * @description Path to InvokeAI databases directory. + * @default databases + */ + db_dir?: string; + /** + * Outputs Dir + * Format: path + * @description Path to directory for outputs. + * @default outputs + */ + outputs_dir?: string; + /** + * Custom Nodes Dir + * Format: path + * @description Path to directory for custom nodes. + * @default nodes + */ + custom_nodes_dir?: string; + /** + * Style Presets Dir + * Format: path + * @description Path to directory for style presets. + * @default style_presets + */ + style_presets_dir?: string; + /** + * Workflow Thumbnails Dir + * Format: path + * @description Path to directory for workflow thumbnails. + * @default workflow_thumbnails + */ + workflow_thumbnails_dir?: string; + /** + * Log Handlers + * @description Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http=". + * @default [ + * "console" + * ] + */ + log_handlers?: string[]; + /** + * Log Format + * @description Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style. + * @default color + * @enum {string} + */ + log_format?: "plain" | "color" | "syslog" | "legacy"; + /** + * Log Level + * @description Emit logging messages at this level or higher. + * @default info + * @enum {string} + */ + log_level?: "debug" | "info" | "warning" | "error" | "critical"; + /** + * Log Sql + * @description Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose. + * @default false + */ + log_sql?: boolean; + /** + * Log Level Network + * @description Log level for network-related messages. 'info' and 'debug' are very verbose. + * @default warning + * @enum {string} + */ + log_level_network?: "debug" | "info" | "warning" | "error" | "critical"; + /** + * Use Memory Db + * @description Use in-memory database. Useful for development. + * @default false + */ + use_memory_db?: boolean; + /** + * Dev Reload + * @description Automatically reload when Python sources are changed. Does not reload node definitions. + * @default false + */ + dev_reload?: boolean; + /** + * Profile Graphs + * @description Enable graph profiling using `cProfile`. + * @default false + */ + profile_graphs?: boolean; + /** + * Profile Prefix + * @description An optional prefix for profile output files. + */ + profile_prefix?: string | null; + /** + * Profiles Dir + * Format: path + * @description Path to profiles output directory. + * @default profiles + */ + profiles_dir?: string; + /** + * Max Cache Ram Gb + * @description The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset. + */ + max_cache_ram_gb?: number | null; + /** + * Max Cache Vram Gb + * @description The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset. + */ + max_cache_vram_gb?: number | null; + /** + * Log Memory Usage + * @description If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour. + * @default false + */ + log_memory_usage?: boolean; + /** + * Device Working Mem Gb + * @description The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value. + * @default 3 + */ + device_working_mem_gb?: number; + /** + * Enable Partial Loading + * @description Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM. + * @default false + */ + enable_partial_loading?: boolean; + /** + * Keep Ram Copy Of Weights + * @description Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high. + * @default true + */ + keep_ram_copy_of_weights?: boolean; + /** + * Ram + * @description DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable. + */ + ram?: number | null; + /** + * Vram + * @description DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable. + */ + vram?: number | null; + /** + * Lazy Offload + * @description DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable. + * @default true + */ + lazy_offload?: boolean; + /** + * Pytorch Cuda Alloc Conf + * @description Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally. + */ + pytorch_cuda_alloc_conf?: string | null; + /** + * Device + * @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number) + * @default auto + */ + device?: string; + /** + * Precision + * @description Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system. + * @default auto + * @enum {string} + */ + precision?: "auto" | "float16" | "bfloat16" | "float32"; + /** + * Sequential Guidance + * @description Whether to calculate guidance in serial instead of in parallel, lowering memory requirements. + * @default false + */ + sequential_guidance?: boolean; + /** + * Attention Type + * @description Attention type. + * @default auto + * @enum {string} + */ + attention_type?: "auto" | "normal" | "xformers" | "sliced" | "torch-sdp"; + /** + * Attention Slice Size + * @description Slice size, valid when attention_type=="sliced". + * @default auto + * @enum {unknown} + */ + attention_slice_size?: "auto" | "balanced" | "max" | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8; + /** + * Force Tiled Decode + * @description Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty). + * @default false + */ + force_tiled_decode?: boolean; + /** + * Pil Compress Level + * @description The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting. + * @default 1 + */ + pil_compress_level?: number; + /** + * Max Queue Size + * @description Maximum number of items in the session queue. + * @default 10000 + */ + max_queue_size?: number; + /** + * Clear Queue On Startup + * @description Empties session queue on startup. + * @default false + */ + clear_queue_on_startup?: boolean; + /** + * Allow Nodes + * @description List of nodes to allow. Omit to allow all. + */ + allow_nodes?: string[] | null; + /** + * Deny Nodes + * @description List of nodes to deny. Omit to deny none. + */ + deny_nodes?: string[] | null; + /** + * Node Cache Size + * @description How many cached nodes to keep in memory. + * @default 512 + */ + node_cache_size?: number; + /** + * Hashing Algorithm + * @description Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3. + * @default blake3_single + * @enum {string} + */ + hashing_algorithm?: "blake3_multi" | "blake3_single" | "random" | "md5" | "sha1" | "sha224" | "sha256" | "sha384" | "sha512" | "blake2b" | "blake2s" | "sha3_224" | "sha3_256" | "sha3_384" | "sha3_512" | "shake_128" | "shake_256"; + /** + * Remote Api Tokens + * @description List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token. + */ + remote_api_tokens?: components["schemas"]["URLRegexTokenPair"][] | null; + /** + * Scan Models On Startup + * @description Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes. + * @default false + */ + scan_models_on_startup?: boolean; + /** + * Unsafe Disable Picklescan + * @description UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production. + * @default false + */ + unsafe_disable_picklescan?: boolean; + /** + * Allow Unknown Models + * @description Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation. + * @default true + */ + allow_unknown_models?: boolean; + }; + /** + * InvokeAIAppConfigWithSetFields + * @description InvokeAI App Config with model fields set + */ + InvokeAIAppConfigWithSetFields: { + /** + * Set Fields + * @description The set fields + */ + set_fields: string[]; + /** @description The InvokeAI App Config */ + config: components["schemas"]["InvokeAIAppConfig"]; + }; + /** + * Adjust Image Hue Plus + * @description Adjusts the Hue of an image by rotating it in the selected color space. Originally created by @dwringer + */ + InvokeAdjustImageHuePlusInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to adjust + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Space + * @description Color space in which to rotate hue by polar coords (*: non-invertible) + * @default HSV / HSL / RGB + * @enum {string} + */ + space?: "HSV / HSL / RGB" | "Okhsl" | "Okhsv" | "*Oklch / Oklab" | "*LCh / CIELab" | "*UPLab (w/CIELab_to_UPLab.icc)"; + /** + * Degrees + * @description Degrees by which to rotate image hue + * @default 0 + */ + degrees?: number; + /** + * Preserve Lightness + * @description Whether to preserve CIELAB lightness values + * @default false + */ + preserve_lightness?: boolean; + /** + * Ok Adaptive Gamut + * @description Higher preserves chroma at the expense of lightness (Oklab) + * @default 0.05 + */ + ok_adaptive_gamut?: number; + /** + * Ok High Precision + * @description Use more steps in computing gamut (Oklab/Okhsv/Okhsl) + * @default true + */ + ok_high_precision?: boolean; + /** + * type + * @default invokeai_img_hue_adjust_plus + * @constant + */ + type: "invokeai_img_hue_adjust_plus"; + }; + /** + * Equivalent Achromatic Lightness + * @description Calculate Equivalent Achromatic Lightness from image. Originally created by @dwringer + */ + InvokeEquivalentAchromaticLightnessInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description Image from which to get channel + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * type + * @default invokeai_ealightness + * @constant + */ + type: "invokeai_ealightness"; + }; + /** + * Image Layer Blend + * @description Blend two images together, with optional opacity, mask, and blend modes. Originally created by @dwringer + */ + InvokeImageBlendInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The top image to blend + * @default null + */ + layer_upper?: components["schemas"]["ImageField"] | null; + /** + * Blend Mode + * @description Available blend modes + * @default Normal + * @enum {string} + */ + blend_mode?: "Normal" | "Lighten Only" | "Darken Only" | "Lighten Only (EAL)" | "Darken Only (EAL)" | "Hue" | "Saturation" | "Color" | "Luminosity" | "Linear Dodge (Add)" | "Subtract" | "Multiply" | "Divide" | "Screen" | "Overlay" | "Linear Burn" | "Difference" | "Hard Light" | "Soft Light" | "Vivid Light" | "Linear Light" | "Color Burn" | "Color Dodge"; + /** + * Opacity + * @description Desired opacity of the upper layer + * @default 1 + */ + opacity?: number; + /** + * @description Optional mask, used to restrict areas from blending + * @default null + */ + mask?: components["schemas"]["ImageField"] | null; + /** + * Fit To Width + * @description Scale upper layer to fit base width + * @default false + */ + fit_to_width?: boolean; + /** + * Fit To Height + * @description Scale upper layer to fit base height + * @default true + */ + fit_to_height?: boolean; + /** + * @description The bottom image to blend + * @default null + */ + layer_base?: components["schemas"]["ImageField"] | null; + /** + * Color Space + * @description Available color spaces for blend computations + * @default RGB + * @enum {string} + */ + color_space?: "RGB" | "Linear RGB" | "HSL (RGB)" | "HSV (RGB)" | "Okhsl" | "Okhsv" | "Oklch (Oklab)" | "LCh (CIELab)"; + /** + * Adaptive Gamut + * @description Adaptive gamut clipping (0=off). Higher prioritizes chroma over lightness + * @default 0 + */ + adaptive_gamut?: number; + /** + * High Precision + * @description Use more steps in computing gamut when possible + * @default true + */ + high_precision?: boolean; + /** + * type + * @default invokeai_img_blend + * @constant + */ + type: "invokeai_img_blend"; + }; + /** + * Image Compositor + * @description Removes backdrop from subject image then overlays subject on background image. Originally created by @dwringer + */ + InvokeImageCompositorInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description Image of the subject on a plain monochrome background + * @default null + */ + image_subject?: components["schemas"]["ImageField"] | null; + /** + * @description Image of a background scene + * @default null + */ + image_background?: components["schemas"]["ImageField"] | null; + /** + * Chroma Key + * @description Can be empty for corner flood select, or CSS-3 color or tuple + * @default + */ + chroma_key?: string; + /** + * Threshold + * @description Subject isolation flood-fill threshold + * @default 50 + */ + threshold?: number; + /** + * Fill X + * @description Scale base subject image to fit background width + * @default false + */ + fill_x?: boolean; + /** + * Fill Y + * @description Scale base subject image to fit background height + * @default true + */ + fill_y?: boolean; + /** + * X Offset + * @description x-offset for the subject + * @default 0 + */ + x_offset?: number; + /** + * Y Offset + * @description y-offset for the subject + * @default 0 + */ + y_offset?: number; + /** + * type + * @default invokeai_img_composite + * @constant + */ + type: "invokeai_img_composite"; + }; + /** + * Image Dilate or Erode + * @description Dilate (expand) or erode (contract) an image. Originally created by @dwringer + */ + InvokeImageDilateOrErodeInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image from which to create a mask + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Lightness Only + * @description If true, only applies to image lightness (CIELa*b*) + * @default false + */ + lightness_only?: boolean; + /** + * Radius W + * @description Width (in pixels) by which to dilate(expand) or erode (contract) the image + * @default 4 + */ + radius_w?: number; + /** + * Radius H + * @description Height (in pixels) by which to dilate(expand) or erode (contract) the image + * @default 4 + */ + radius_h?: number; + /** + * Mode + * @description How to operate on the image + * @default Dilate + * @enum {string} + */ + mode?: "Dilate" | "Erode"; + /** + * type + * @default invokeai_img_dilate_erode + * @constant + */ + type: "invokeai_img_dilate_erode"; + }; + /** + * Enhance Image + * @description Applies processing from PIL's ImageEnhance module. Originally created by @dwringer + */ + InvokeImageEnhanceInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image for which to apply processing + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Invert + * @description Whether to invert the image colors + * @default false + */ + invert?: boolean; + /** + * Color + * @description Color enhancement factor + * @default 1 + */ + color?: number; + /** + * Contrast + * @description Contrast enhancement factor + * @default 1 + */ + contrast?: number; + /** + * Brightness + * @description Brightness enhancement factor + * @default 1 + */ + brightness?: number; + /** + * Sharpness + * @description Sharpness enhancement factor + * @default 1 + */ + sharpness?: number; + /** + * type + * @default invokeai_img_enhance + * @constant + */ + type: "invokeai_img_enhance"; + }; + /** + * Image Value Thresholds + * @description Clip image to pure black/white past specified thresholds. Originally created by @dwringer + */ + InvokeImageValueThresholdsInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image from which to create a mask + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Invert Output + * @description Make light areas dark and vice versa + * @default false + */ + invert_output?: boolean; + /** + * Renormalize Values + * @description Rescale remaining values from minimum to maximum + * @default false + */ + renormalize_values?: boolean; + /** + * Lightness Only + * @description If true, only applies to image lightness (CIELa*b*) + * @default false + */ + lightness_only?: boolean; + /** + * Threshold Upper + * @description Threshold above which will be set to full value + * @default 0.5 + */ + threshold_upper?: number; + /** + * Threshold Lower + * @description Threshold below which will be set to minimum value + * @default 0.5 + */ + threshold_lower?: number; + /** + * type + * @default invokeai_img_val_thresholds + * @constant + */ + type: "invokeai_img_val_thresholds"; + }; + /** + * ItemIdsResult + * @description Response containing ordered item ids with metadata for optimistic updates. + */ + ItemIdsResult: { + /** + * Item Ids + * @description Ordered list of item ids + */ + item_ids: number[]; + /** + * Total Count + * @description Total number of queue items matching the query + */ + total_count: number; + }; + /** + * IterateInvocation + * @description Iterates over a list of items + */ + IterateInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Collection + * @description The list of items to iterate over + * @default [] + */ + collection?: unknown[]; + /** + * Index + * @description The index, will be provided on executed iterators + * @default 0 + */ + index?: number; + /** + * type + * @default iterate + * @constant + */ + type: "iterate"; + }; + /** + * IterateInvocationOutput + * @description Used to connect iteration outputs. Will be expanded to a specific output. + */ + IterateInvocationOutput: { + /** + * Collection Item + * @description The item being iterated over + */ + item: unknown; + /** + * Index + * @description The index of the item + */ + index: number; + /** + * Total + * @description The total number of items + */ + total: number; + /** + * type + * @default iterate_output + * @constant + */ + type: "iterate_output"; + }; + JsonValue: unknown; + /** + * LaMa Infill + * @description Infills transparent areas of an image using the LaMa model + */ + LaMaInfillInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to process + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * type + * @default infill_lama + * @constant + */ + type: "infill_lama"; + }; + /** + * Latents Collection Primitive + * @description A collection of latents tensor primitive values + */ + LatentsCollectionInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Collection + * @description The collection of latents tensors + * @default null + */ + collection?: components["schemas"]["LatentsField"][] | null; + /** + * type + * @default latents_collection + * @constant + */ + type: "latents_collection"; + }; + /** + * LatentsCollectionOutput + * @description Base class for nodes that output a collection of latents tensors + */ + LatentsCollectionOutput: { + /** + * Collection + * @description Latents tensor + */ + collection: components["schemas"]["LatentsField"][]; + /** + * type + * @default latents_collection_output + * @constant + */ + type: "latents_collection_output"; + }; + /** + * LatentsField + * @description A latents tensor primitive field + */ + LatentsField: { + /** + * Latents Name + * @description The name of the latents + */ + latents_name: string; + /** + * Seed + * @description Seed used to generate this latents + * @default null + */ + seed?: number | null; + }; + /** + * Latents Primitive + * @description A latents tensor primitive value + */ + LatentsInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The latents tensor + * @default null + */ + latents?: components["schemas"]["LatentsField"] | null; + /** + * type + * @default latents + * @constant + */ + type: "latents"; + }; + /** + * LatentsMetaOutput + * @description Latents + metadata + */ + LatentsMetaOutput: { + /** @description Metadata Dict */ + metadata: components["schemas"]["MetadataField"]; + /** + * type + * @default latents_meta_output + * @constant + */ + type: "latents_meta_output"; + /** @description Latents tensor */ + latents: components["schemas"]["LatentsField"]; + /** + * Width + * @description Width of output (px) + */ + width: number; + /** + * Height + * @description Height of output (px) + */ + height: number; + }; + /** + * LatentsOutput + * @description Base class for nodes that output a single latents tensor + */ + LatentsOutput: { + /** @description Latents tensor */ + latents: components["schemas"]["LatentsField"]; + /** + * Width + * @description Width of output (px) + */ + width: number; + /** + * Height + * @description Height of output (px) + */ + height: number; + /** + * type + * @default latents_output + * @constant + */ + type: "latents_output"; + }; + /** + * Latents to Image - SD1.5, SDXL + * @description Generates an image from latents. + */ + LatentsToImageInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description Latents tensor + * @default null + */ + latents?: components["schemas"]["LatentsField"] | null; + /** + * @description VAE + * @default null + */ + vae?: components["schemas"]["VAEField"] | null; + /** + * Tiled + * @description Processing using overlapping tiles (reduce memory consumption) + * @default false + */ + tiled?: boolean; + /** + * Tile Size + * @description The tile size for VAE tiling in pixels (image space). If set to 0, the default tile size for the model will be used. Larger tile sizes generally produce better results at the cost of higher memory usage. + * @default 0 + */ + tile_size?: number; + /** + * Fp32 + * @description Whether or not to use full float32 precision + * @default false + */ + fp32?: boolean; + /** + * type + * @default l2i + * @constant + */ + type: "l2i"; + }; + /** + * Lineart Anime Edge Detection + * @description Geneartes an edge map using the Lineart model. + */ + LineartAnimeEdgeDetectionInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to process + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * type + * @default lineart_anime_edge_detection + * @constant + */ + type: "lineart_anime_edge_detection"; + }; + /** + * Lineart Edge Detection + * @description Generates an edge map using the Lineart model. + */ + LineartEdgeDetectionInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to process + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Coarse + * @description Whether to use coarse mode + * @default false + */ + coarse?: boolean; + /** + * type + * @default lineart_edge_detection + * @constant + */ + type: "lineart_edge_detection"; + }; + /** + * LLaVA OneVision VLLM + * @description Run a LLaVA OneVision VLLM model. + */ + LlavaOnevisionVllmInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Images + * @description Input image. + * @default null + */ + images?: (components["schemas"]["ImageField"][] | components["schemas"]["ImageField"]) | null; + /** + * Prompt + * @description Input text prompt. + * @default + */ + prompt?: string; + /** + * LLaVA Model Type + * @description The VLLM model to use + * @default null + */ + vllm_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * type + * @default llava_onevision_vllm + * @constant + */ + type: "llava_onevision_vllm"; + }; + /** + * LlavaOnevision_Diffusers_Config + * @description Model config for Llava Onevision models. + */ + LlavaOnevision_Diffusers_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; + /** + * Type + * @default llava_onevision + * @constant + */ + type: "llava_onevision"; + /** + * Base + * @default any + * @constant + */ + base: "any"; + }; + /** + * Apply LoRA Collection - SD1.5 + * @description Applies a collection of LoRAs to the provided UNet and CLIP models. + */ + LoRACollectionLoader: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * LoRAs + * @description LoRA models and weights. May be a single LoRA or collection. + * @default null + */ + loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet?: components["schemas"]["UNetField"] | null; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip?: components["schemas"]["CLIPField"] | null; + /** + * type + * @default lora_collection_loader + * @constant + */ + type: "lora_collection_loader"; + }; + /** LoRAField */ + LoRAField: { + /** @description Info to load lora model */ + lora: components["schemas"]["ModelIdentifierField"]; + /** + * Weight + * @description Weight to apply to lora model + */ + weight: number; + }; + /** + * Apply LoRA - SD1.5 + * @description Apply selected lora to unet and text_encoder. + */ + LoRALoaderInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * LoRA + * @description LoRA model to load + * @default null + */ + lora?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Weight + * @description The weight at which the LoRA is applied to each model + * @default 0.75 + */ + weight?: number; + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet?: components["schemas"]["UNetField"] | null; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip?: components["schemas"]["CLIPField"] | null; + /** + * type + * @default lora_loader + * @constant + */ + type: "lora_loader"; + }; + /** + * LoRALoaderOutput + * @description Model loader output + */ + LoRALoaderOutput: { + /** + * UNet + * @description UNet (scheduler, LoRAs) + * @default null + */ + unet: components["schemas"]["UNetField"] | null; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip: components["schemas"]["CLIPField"] | null; + /** + * type + * @default lora_loader_output + * @constant + */ + type: "lora_loader_output"; + }; + /** + * LoRAMetadataField + * @description LoRA Metadata Field + */ + LoRAMetadataField: { + /** @description LoRA model to load */ + model: components["schemas"]["ModelIdentifierField"]; + /** + * Weight + * @description The weight at which the LoRA is applied to each model + */ + weight: number; + }; + /** + * Select LoRA + * @description Selects a LoRA model and weight. + */ + LoRASelectorInvocation: { + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * LoRA + * @description LoRA model to load + * @default null + */ + lora?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Weight + * @description The weight at which the LoRA is applied to each model + * @default 0.75 + */ + weight?: number; + /** + * type + * @default lora_selector + * @constant + */ + type: "lora_selector"; + }; + /** + * LoRASelectorOutput + * @description Model loader output + */ + LoRASelectorOutput: { + /** + * LoRA + * @description LoRA model and weight + */ + lora: components["schemas"]["LoRAField"]; + /** + * type + * @default lora_selector_output + * @constant + */ + type: "lora_selector_output"; + }; + /** LoRA_Diffusers_FLUX_Config */ + LoRA_Diffusers_FLUX_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; + /** + * Base + * @default flux + * @constant + */ + base: "flux"; + }; + /** LoRA_Diffusers_SD1_Config */ + LoRA_Diffusers_SD1_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; + /** + * Base + * @default sd-1 + * @constant + */ + base: "sd-1"; + }; + /** LoRA_Diffusers_SD2_Config */ + LoRA_Diffusers_SD2_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; + /** + * Base + * @default sd-2 + * @constant + */ + base: "sd-2"; + }; + /** LoRA_Diffusers_SDXL_Config */ + LoRA_Diffusers_SDXL_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; + /** + * Base + * @default sdxl + * @constant + */ + base: "sdxl"; + }; + /** LoRA_LyCORIS_FLUX_Config */ + LoRA_LyCORIS_FLUX_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default lycoris + * @constant + */ + format: "lycoris"; + /** + * Base + * @default flux + * @constant + */ + base: "flux"; + }; + /** LoRA_LyCORIS_SD1_Config */ + LoRA_LyCORIS_SD1_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default lycoris + * @constant + */ + format: "lycoris"; + /** + * Base + * @default sd-1 + * @constant + */ + base: "sd-1"; + }; + /** LoRA_LyCORIS_SD2_Config */ + LoRA_LyCORIS_SD2_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default lycoris + * @constant + */ + format: "lycoris"; + /** + * Base + * @default sd-2 + * @constant + */ + base: "sd-2"; + }; + /** LoRA_LyCORIS_SDXL_Config */ + LoRA_LyCORIS_SDXL_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default lycoris + * @constant + */ + format: "lycoris"; + /** + * Base + * @default sdxl + * @constant + */ + base: "sdxl"; + }; + /** LoRA_OMI_FLUX_Config */ + LoRA_OMI_FLUX_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default omi + * @constant + */ + format: "omi"; + /** + * Base + * @default flux + * @constant + */ + base: "flux"; + }; + /** LoRA_OMI_SDXL_Config */ + LoRA_OMI_SDXL_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default lora + * @constant + */ + type: "lora"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["LoraModelDefaultSettings"] | null; + /** + * Format + * @default omi + * @constant + */ + format: "omi"; + /** + * Base + * @default sdxl + * @constant + */ + base: "sdxl"; + }; + /** + * LocalModelSource + * @description A local file or directory path. + */ + LocalModelSource: { + /** Path */ + path: string; + /** + * Inplace + * @default false + */ + inplace?: boolean | null; + /** + * @description discriminator enum property added by openapi-typescript + * @enum {string} + */ + type: "local"; + }; + /** + * LogLevel + * @enum {integer} */ - InvocationProgressEvent: { + LogLevel: 0 | 10 | 20 | 30 | 40 | 50; + /** LoraModelDefaultSettings */ + LoraModelDefaultSettings: { /** - * Timestamp - * @description The timestamp of the event + * Weight + * @description Default weight for this model */ - timestamp: number; + weight?: number | null; + }; + /** MDControlListOutput */ + MDControlListOutput: { /** - * Queue Id - * @description The ID of the queue + * ControlNet-List + * @description ControlNet(s) to apply */ - queue_id: string; + control_list: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; /** - * Item Id - * @description The ID of the queue item + * type + * @default md_control_list_output + * @constant */ - item_id: number; + type: "md_control_list_output"; + }; + /** MDIPAdapterListOutput */ + MDIPAdapterListOutput: { /** - * Batch Id - * @description The ID of the queue batch + * IP-Adapter-List + * @description IP-Adapter to apply */ - batch_id: string; + ip_adapter_list: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; /** - * Origin - * @description The origin of the queue item - * @default null + * type + * @default md_ip_adapter_list_output + * @constant */ - origin: string | null; + type: "md_ip_adapter_list_output"; + }; + /** MDT2IAdapterListOutput */ + MDT2IAdapterListOutput: { /** - * Destination - * @description The destination of the queue item + * T2I Adapter-List + * @description T2I-Adapter(s) to apply + */ + t2i_adapter_list: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; + /** + * type + * @default md_ip_adapters_output + * @constant + */ + type: "md_ip_adapters_output"; + }; + /** + * MLSD Detection + * @description Generates an line segment map using MLSD. + */ + MLSDDetectionInvocation: { + /** + * @description The board to save the image to * @default null */ - destination: string | null; + board?: components["schemas"]["BoardField"] | null; /** - * Session Id - * @description The ID of the session (aka graph execution state) + * @description Optional metadata to be saved with the image + * @default null */ - session_id: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Invocation - * @description The ID of the invocation + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + id: string; /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - invocation_source_id: string; + is_intermediate?: boolean; /** - * Message - * @description A message to display + * Use Cache + * @description Whether or not to use the cache + * @default true */ - message: string; + use_cache?: boolean; /** - * Percentage - * @description The percentage of the progress (omit to indicate indeterminate progress) + * @description The image to process * @default null */ - percentage: number | null; + image?: components["schemas"]["ImageField"] | null; /** - * @description An image representing the current state of the progress - * @default null + * Score Threshold + * @description The threshold used to score points when determining line segments + * @default 0.1 */ - image: components["schemas"]["ProgressImage"] | null; - }; - /** - * InvocationStartedEvent - * @description Event model for invocation_started - */ - InvocationStartedEvent: { + score_threshold?: number; /** - * Timestamp - * @description The timestamp of the event + * Distance Threshold + * @description Threshold for including a line segment - lines shorter than this distance will be discarded + * @default 20 */ - timestamp: number; + distance_threshold?: number; /** - * Queue Id - * @description The ID of the queue + * type + * @default mlsd_detection + * @constant */ - queue_id: string; + type: "mlsd_detection"; + }; + /** MainModelDefaultSettings */ + MainModelDefaultSettings: { /** - * Item Id - * @description The ID of the queue item + * Vae + * @description Default VAE for this model (model key) */ - item_id: number; + vae?: string | null; /** - * Batch Id - * @description The ID of the queue batch + * Vae Precision + * @description Default VAE precision for this model */ - batch_id: string; + vae_precision?: ("fp16" | "fp32") | null; /** - * Origin - * @description The origin of the queue item - * @default null + * Scheduler + * @description Default scheduler for this model */ - origin: string | null; + scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd") | null; /** - * Destination - * @description The destination of the queue item - * @default null + * Steps + * @description Default number of steps for this model */ - destination: string | null; + steps?: number | null; /** - * Session Id - * @description The ID of the session (aka graph execution state) + * Cfg Scale + * @description Default CFG Scale for this model */ - session_id: string; + cfg_scale?: number | null; /** - * Invocation - * @description The ID of the invocation + * Cfg Rescale Multiplier + * @description Default CFG Rescale Multiplier for this model */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"]; + cfg_rescale_multiplier?: number | null; /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node + * Width + * @description Default width for this model */ - invocation_source_id: string; - }; - /** - * InvokeAIAppConfig - * @description Invoke's global app configuration. - * - * Typically, you won't need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object. - * - * Attributes: - * host: IP address to bind to. Use `0.0.0.0` to serve to your local network. - * port: Port to bind to. - * allow_origins: Allowed CORS origins. - * allow_credentials: Allow CORS credentials. - * allow_methods: Methods allowed for CORS. - * allow_headers: Headers allowed for CORS. - * ssl_certfile: SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https. - * ssl_keyfile: SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https. - * log_tokenization: Enable logging of parsed prompt tokens. - * patchmatch: Enable patchmatch inpaint code. - * models_dir: Path to the models directory. - * convert_cache_dir: Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions). - * download_cache_dir: Path to the directory that contains dynamically downloaded models. - * legacy_conf_dir: Path to directory of legacy checkpoint config files. - * db_dir: Path to InvokeAI databases directory. - * outputs_dir: Path to directory for outputs. - * custom_nodes_dir: Path to directory for custom nodes. - * style_presets_dir: Path to directory for style presets. - * workflow_thumbnails_dir: Path to directory for workflow thumbnails. - * log_handlers: Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http=". - * log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.
Valid values: `plain`, `color`, `syslog`, `legacy` - * log_level: Emit logging messages at this level or higher.
Valid values: `debug`, `info`, `warning`, `error`, `critical` - * log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose. - * log_level_network: Log level for network-related messages. 'info' and 'debug' are very verbose.
Valid values: `debug`, `info`, `warning`, `error`, `critical` - * use_memory_db: Use in-memory database. Useful for development. - * dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions. - * profile_graphs: Enable graph profiling using `cProfile`. - * profile_prefix: An optional prefix for profile output files. - * profiles_dir: Path to profiles output directory. - * max_cache_ram_gb: The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset. - * max_cache_vram_gb: The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset. - * log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour. - * device_working_mem_gb: The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value. - * enable_partial_loading: Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM. - * keep_ram_copy_of_weights: Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high. - * ram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable. - * vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable. - * lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable. - * pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally. - * device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number) - * precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
Valid values: `auto`, `float16`, `bfloat16`, `float32` - * sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements. - * attention_type: Attention type.
Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp` - * attention_slice_size: Slice size, valid when attention_type=="sliced".
Valid values: `auto`, `balanced`, `max`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8` - * force_tiled_decode: Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty). - * pil_compress_level: The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting. - * max_queue_size: Maximum number of items in the session queue. - * clear_queue_on_startup: Empties session queue on startup. - * allow_nodes: List of nodes to allow. Omit to allow all. - * deny_nodes: List of nodes to deny. Omit to deny none. - * node_cache_size: How many cached nodes to keep in memory. - * hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.
Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256` - * remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token. - * scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes. - * unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production. - * allow_unknown_models: Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation. + width?: number | null; + /** + * Height + * @description Default height for this model + */ + height?: number | null; + /** + * Guidance + * @description Default Guidance for this model + */ + guidance?: number | null; + }; + /** + * Main Model - SD1.5, SD2 + * @description Loads a main model, outputting its submodels. */ - InvokeAIAppConfig: { + MainModelLoaderInvocation: { /** - * Schema Version - * @description Schema version of the config file. This is not a user-configurable setting. - * @default 4.0.2 + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - schema_version?: string; + id: string; /** - * Legacy Models Yaml Path - * @description Path to the legacy models.yaml file. This is not a user-configurable setting. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - legacy_models_yaml_path?: string | null; + is_intermediate?: boolean; /** - * Host - * @description IP address to bind to. Use `0.0.0.0` to serve to your local network. - * @default 127.0.0.1 + * Use Cache + * @description Whether or not to use the cache + * @default true */ - host?: string; + use_cache?: boolean; /** - * Port - * @description Port to bind to. - * @default 9090 + * @description Main model (UNet, VAE, CLIP) to load + * @default null */ - port?: number; + model?: components["schemas"]["ModelIdentifierField"] | null; /** - * Allow Origins - * @description Allowed CORS origins. - * @default [] + * type + * @default main_model_loader + * @constant */ - allow_origins?: string[]; + type: "main_model_loader"; + }; + /** + * Main_BnBNF4_FLUX_Config + * @description Model config for main checkpoint models. + */ + Main_BnBNF4_FLUX_Config: { /** - * Allow Credentials - * @description Allow CORS credentials. - * @default true + * Key + * @description A unique key for this model. */ - allow_credentials?: boolean; + key: string; /** - * Allow Methods - * @description Methods allowed for CORS. - * @default [ - * "*" - * ] + * Hash + * @description The hash of the model file(s). */ - allow_methods?: string[]; + hash: string; /** - * Allow Headers - * @description Headers allowed for CORS. - * @default [ - * "*" - * ] + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - allow_headers?: string[]; + path: string; /** - * Ssl Certfile - * @description SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https. + * File Size + * @description The size of the model in bytes. */ - ssl_certfile?: string | null; + file_size: number; /** - * Ssl Keyfile - * @description SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https. + * Name + * @description Name of the model. */ - ssl_keyfile?: string | null; + name: string; /** - * Log Tokenization - * @description Enable logging of parsed prompt tokens. - * @default false + * Description + * @description Model description */ - log_tokenization?: boolean; + description: string | null; /** - * Patchmatch - * @description Enable patchmatch inpaint code. - * @default true + * Source + * @description The original source of the model (path, URL or repo_id). */ - patchmatch?: boolean; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Models Dir - * Format: path - * @description Path to the models directory. - * @default models + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - models_dir?: string; + source_api_response: string | null; /** - * Convert Cache Dir - * Format: path - * @description Path to the converted models cache directory (DEPRECATED, but do not delete because it is needed for migration from previous versions). - * @default models/.convert_cache + * Cover Image + * @description Url for image to preview model */ - convert_cache_dir?: string; + cover_image: string | null; /** - * Download Cache Dir - * Format: path - * @description Path to the directory that contains dynamically downloaded models. - * @default models/.download_cache + * Usage Info + * @description Usage information for this model */ - download_cache_dir?: string; + usage_info: string | null; /** - * Legacy Conf Dir - * Format: path - * @description Path to directory of legacy checkpoint config files. - * @default configs + * Type + * @default main + * @constant */ - legacy_conf_dir?: string; + type: "main"; /** - * Db Dir - * Format: path - * @description Path to InvokeAI databases directory. - * @default databases + * Trigger Phrases + * @description Set of trigger phrases for this model */ - db_dir?: string; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Outputs Dir - * Format: path - * @description Path to directory for outputs. - * @default outputs + * Config Path + * @description Path to the config for this model, if any. */ - outputs_dir?: string; + config_path: string | null; /** - * Custom Nodes Dir - * Format: path - * @description Path to directory for custom nodes. - * @default nodes + * Base + * @default flux + * @constant */ - custom_nodes_dir?: string; + base: "flux"; /** - * Style Presets Dir - * Format: path - * @description Path to directory for style presets. - * @default style_presets + * Format + * @default bnb_quantized_nf4b + * @constant */ - style_presets_dir?: string; + format: "bnb_quantized_nf4b"; + variant: components["schemas"]["FluxVariantType"]; + }; + /** + * Main_Checkpoint_FLUX_Config + * @description Model config for main checkpoint models. + */ + Main_Checkpoint_FLUX_Config: { /** - * Workflow Thumbnails Dir - * Format: path - * @description Path to directory for workflow thumbnails. - * @default workflow_thumbnails + * Key + * @description A unique key for this model. */ - workflow_thumbnails_dir?: string; + key: string; /** - * Log Handlers - * @description Log handler. Valid options are "console", "file=", "syslog=path|address:host:port", "http=". - * @default [ - * "console" - * ] + * Hash + * @description The hash of the model file(s). */ - log_handlers?: string[]; + hash: string; /** - * Log Format - * @description Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style. - * @default color - * @enum {string} + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - log_format?: "plain" | "color" | "syslog" | "legacy"; + path: string; /** - * Log Level - * @description Emit logging messages at this level or higher. - * @default info - * @enum {string} + * File Size + * @description The size of the model in bytes. */ - log_level?: "debug" | "info" | "warning" | "error" | "critical"; + file_size: number; /** - * Log Sql - * @description Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose. - * @default false + * Name + * @description Name of the model. */ - log_sql?: boolean; + name: string; /** - * Log Level Network - * @description Log level for network-related messages. 'info' and 'debug' are very verbose. - * @default warning - * @enum {string} + * Description + * @description Model description */ - log_level_network?: "debug" | "info" | "warning" | "error" | "critical"; + description: string | null; /** - * Use Memory Db - * @description Use in-memory database. Useful for development. - * @default false + * Source + * @description The original source of the model (path, URL or repo_id). */ - use_memory_db?: boolean; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Dev Reload - * @description Automatically reload when Python sources are changed. Does not reload node definitions. - * @default false + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - dev_reload?: boolean; + source_api_response: string | null; /** - * Profile Graphs - * @description Enable graph profiling using `cProfile`. - * @default false + * Cover Image + * @description Url for image to preview model */ - profile_graphs?: boolean; + cover_image: string | null; /** - * Profile Prefix - * @description An optional prefix for profile output files. + * Usage Info + * @description Usage information for this model */ - profile_prefix?: string | null; + usage_info: string | null; /** - * Profiles Dir - * Format: path - * @description Path to profiles output directory. - * @default profiles + * Type + * @default main + * @constant + */ + type: "main"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model */ - profiles_dir?: string; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Max Cache Ram Gb - * @description The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset. + * Config Path + * @description Path to the config for this model, if any. */ - max_cache_ram_gb?: number | null; + config_path: string | null; /** - * Max Cache Vram Gb - * @description The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset. + * Format + * @default checkpoint + * @constant */ - max_cache_vram_gb?: number | null; + format: "checkpoint"; /** - * Log Memory Usage - * @description If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour. - * @default false + * Base + * @default flux + * @constant */ - log_memory_usage?: boolean; + base: "flux"; + variant: components["schemas"]["FluxVariantType"]; + }; + /** Main_Checkpoint_SD1_Config */ + Main_Checkpoint_SD1_Config: { /** - * Device Working Mem Gb - * @description The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value. - * @default 3 + * Key + * @description A unique key for this model. */ - device_working_mem_gb?: number; + key: string; /** - * Enable Partial Loading - * @description Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM. - * @default false + * Hash + * @description The hash of the model file(s). */ - enable_partial_loading?: boolean; + hash: string; /** - * Keep Ram Copy Of Weights - * @description Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high. - * @default true + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - keep_ram_copy_of_weights?: boolean; + path: string; /** - * Ram - * @description DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable. + * File Size + * @description The size of the model in bytes. */ - ram?: number | null; + file_size: number; /** - * Vram - * @description DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable. + * Name + * @description Name of the model. */ - vram?: number | null; + name: string; /** - * Lazy Offload - * @description DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable. - * @default true + * Description + * @description Model description */ - lazy_offload?: boolean; + description: string | null; /** - * Pytorch Cuda Alloc Conf - * @description Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally. + * Source + * @description The original source of the model (path, URL or repo_id). */ - pytorch_cuda_alloc_conf?: string | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Device - * @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number) - * @default auto + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - device?: string; + source_api_response: string | null; /** - * Precision - * @description Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system. - * @default auto - * @enum {string} + * Cover Image + * @description Url for image to preview model */ - precision?: "auto" | "float16" | "bfloat16" | "float32"; + cover_image: string | null; /** - * Sequential Guidance - * @description Whether to calculate guidance in serial instead of in parallel, lowering memory requirements. - * @default false + * Usage Info + * @description Usage information for this model */ - sequential_guidance?: boolean; + usage_info: string | null; /** - * Attention Type - * @description Attention type. - * @default auto - * @enum {string} + * Type + * @default main + * @constant */ - attention_type?: "auto" | "normal" | "xformers" | "sliced" | "torch-sdp"; + type: "main"; /** - * Attention Slice Size - * @description Slice size, valid when attention_type=="sliced". - * @default auto - * @enum {unknown} + * Trigger Phrases + * @description Set of trigger phrases for this model */ - attention_slice_size?: "auto" | "balanced" | "max" | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Force Tiled Decode - * @description Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty). - * @default false + * Config Path + * @description Path to the config for this model, if any. */ - force_tiled_decode?: boolean; + config_path: string | null; /** - * Pil Compress Level - * @description The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting. - * @default 1 + * Format + * @default checkpoint + * @constant */ - pil_compress_level?: number; + format: "checkpoint"; + prediction_type: components["schemas"]["SchedulerPredictionType"]; + variant: components["schemas"]["ModelVariantType"]; /** - * Max Queue Size - * @description Maximum number of items in the session queue. - * @default 10000 + * Base + * @default sd-1 + * @constant */ - max_queue_size?: number; + base: "sd-1"; + }; + /** Main_Checkpoint_SD2_Config */ + Main_Checkpoint_SD2_Config: { /** - * Clear Queue On Startup - * @description Empties session queue on startup. - * @default false + * Key + * @description A unique key for this model. */ - clear_queue_on_startup?: boolean; + key: string; /** - * Allow Nodes - * @description List of nodes to allow. Omit to allow all. + * Hash + * @description The hash of the model file(s). */ - allow_nodes?: string[] | null; + hash: string; /** - * Deny Nodes - * @description List of nodes to deny. Omit to deny none. + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - deny_nodes?: string[] | null; + path: string; /** - * Node Cache Size - * @description How many cached nodes to keep in memory. - * @default 512 + * File Size + * @description The size of the model in bytes. */ - node_cache_size?: number; + file_size: number; /** - * Hashing Algorithm - * @description Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3. - * @default blake3_single - * @enum {string} + * Name + * @description Name of the model. */ - hashing_algorithm?: "blake3_multi" | "blake3_single" | "random" | "md5" | "sha1" | "sha224" | "sha256" | "sha384" | "sha512" | "blake2b" | "blake2s" | "sha3_224" | "sha3_256" | "sha3_384" | "sha3_512" | "shake_128" | "shake_256"; + name: string; /** - * Remote Api Tokens - * @description List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token. + * Description + * @description Model description */ - remote_api_tokens?: components["schemas"]["URLRegexTokenPair"][] | null; + description: string | null; /** - * Scan Models On Startup - * @description Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes. - * @default false + * Source + * @description The original source of the model (path, URL or repo_id). */ - scan_models_on_startup?: boolean; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Unsafe Disable Picklescan - * @description UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production. - * @default false + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - unsafe_disable_picklescan?: boolean; + source_api_response: string | null; /** - * Allow Unknown Models - * @description Allow installation of models that we are unable to identify. If enabled, models will be marked as `unknown` in the database, and will not have any metadata associated with them. If disabled, unknown models will be rejected during installation. - * @default true + * Cover Image + * @description Url for image to preview model */ - allow_unknown_models?: boolean; - }; - /** - * InvokeAIAppConfigWithSetFields - * @description InvokeAI App Config with model fields set - */ - InvokeAIAppConfigWithSetFields: { + cover_image: string | null; /** - * Set Fields - * @description The set fields + * Usage Info + * @description Usage information for this model */ - set_fields: string[]; - /** @description The InvokeAI App Config */ - config: components["schemas"]["InvokeAIAppConfig"]; - }; - /** - * Adjust Image Hue Plus - * @description Adjusts the Hue of an image by rotating it in the selected color space. Originally created by @dwringer - */ - InvokeAdjustImageHuePlusInvocation: { + usage_info: string | null; /** - * @description The board to save the image to - * @default null + * Type + * @default main + * @constant */ - board?: components["schemas"]["BoardField"] | null; + type: "main"; /** - * @description Optional metadata to be saved with the image - * @default null + * Trigger Phrases + * @description Set of trigger phrases for this model */ - metadata?: components["schemas"]["MetadataField"] | null; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Config Path + * @description Path to the config for this model, if any. */ - id: string; + config_path: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Format + * @default checkpoint + * @constant */ - is_intermediate?: boolean; + format: "checkpoint"; + prediction_type: components["schemas"]["SchedulerPredictionType"]; + variant: components["schemas"]["ModelVariantType"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Base + * @default sd-2 + * @constant */ - use_cache?: boolean; + base: "sd-2"; + }; + /** Main_Checkpoint_SDXLRefiner_Config */ + Main_Checkpoint_SDXLRefiner_Config: { /** - * @description The image to adjust - * @default null + * Key + * @description A unique key for this model. */ - image?: components["schemas"]["ImageField"] | null; + key: string; /** - * Space - * @description Color space in which to rotate hue by polar coords (*: non-invertible) - * @default HSV / HSL / RGB - * @enum {string} + * Hash + * @description The hash of the model file(s). */ - space?: "HSV / HSL / RGB" | "Okhsl" | "Okhsv" | "*Oklch / Oklab" | "*LCh / CIELab" | "*UPLab (w/CIELab_to_UPLab.icc)"; + hash: string; /** - * Degrees - * @description Degrees by which to rotate image hue - * @default 0 + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - degrees?: number; + path: string; /** - * Preserve Lightness - * @description Whether to preserve CIELAB lightness values - * @default false + * File Size + * @description The size of the model in bytes. */ - preserve_lightness?: boolean; + file_size: number; /** - * Ok Adaptive Gamut - * @description Higher preserves chroma at the expense of lightness (Oklab) - * @default 0.05 + * Name + * @description Name of the model. */ - ok_adaptive_gamut?: number; + name: string; /** - * Ok High Precision - * @description Use more steps in computing gamut (Oklab/Okhsv/Okhsl) - * @default true + * Description + * @description Model description */ - ok_high_precision?: boolean; + description: string | null; /** - * type - * @default invokeai_img_hue_adjust_plus - * @constant + * Source + * @description The original source of the model (path, URL or repo_id). */ - type: "invokeai_img_hue_adjust_plus"; - }; - /** - * Equivalent Achromatic Lightness - * @description Calculate Equivalent Achromatic Lightness from image. Originally created by @dwringer - */ - InvokeEquivalentAchromaticLightnessInvocation: { + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * @description The board to save the image to - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - board?: components["schemas"]["BoardField"] | null; + source_api_response: string | null; /** - * @description Optional metadata to be saved with the image - * @default null + * Cover Image + * @description Url for image to preview model */ - metadata?: components["schemas"]["MetadataField"] | null; + cover_image: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Usage Info + * @description Usage information for this model */ - id: string; + usage_info: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Type + * @default main + * @constant */ - is_intermediate?: boolean; + type: "main"; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Trigger Phrases + * @description Set of trigger phrases for this model */ - use_cache?: boolean; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * @description Image from which to get channel - * @default null + * Config Path + * @description Path to the config for this model, if any. */ - image?: components["schemas"]["ImageField"] | null; + config_path: string | null; /** - * type - * @default invokeai_ealightness + * Format + * @default checkpoint * @constant */ - type: "invokeai_ealightness"; - }; - /** - * Image Layer Blend - * @description Blend two images together, with optional opacity, mask, and blend modes. Originally created by @dwringer - */ - InvokeImageBlendInvocation: { + format: "checkpoint"; + prediction_type: components["schemas"]["SchedulerPredictionType"]; + variant: components["schemas"]["ModelVariantType"]; /** - * @description The board to save the image to - * @default null + * Base + * @default sdxl-refiner + * @constant */ - board?: components["schemas"]["BoardField"] | null; + base: "sdxl-refiner"; + }; + /** Main_Checkpoint_SDXL_Config */ + Main_Checkpoint_SDXL_Config: { /** - * @description Optional metadata to be saved with the image - * @default null + * Key + * @description A unique key for this model. */ - metadata?: components["schemas"]["MetadataField"] | null; + key: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Hash + * @description The hash of the model file(s). */ - id: string; + hash: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - is_intermediate?: boolean; + path: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * File Size + * @description The size of the model in bytes. */ - use_cache?: boolean; + file_size: number; /** - * @description The top image to blend - * @default null + * Name + * @description Name of the model. */ - layer_upper?: components["schemas"]["ImageField"] | null; + name: string; /** - * Blend Mode - * @description Available blend modes - * @default Normal - * @enum {string} + * Description + * @description Model description */ - blend_mode?: "Normal" | "Lighten Only" | "Darken Only" | "Lighten Only (EAL)" | "Darken Only (EAL)" | "Hue" | "Saturation" | "Color" | "Luminosity" | "Linear Dodge (Add)" | "Subtract" | "Multiply" | "Divide" | "Screen" | "Overlay" | "Linear Burn" | "Difference" | "Hard Light" | "Soft Light" | "Vivid Light" | "Linear Light" | "Color Burn" | "Color Dodge"; + description: string | null; /** - * Opacity - * @description Desired opacity of the upper layer - * @default 1 + * Source + * @description The original source of the model (path, URL or repo_id). */ - opacity?: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * @description Optional mask, used to restrict areas from blending - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - mask?: components["schemas"]["ImageField"] | null; + source_api_response: string | null; /** - * Fit To Width - * @description Scale upper layer to fit base width - * @default false + * Cover Image + * @description Url for image to preview model */ - fit_to_width?: boolean; + cover_image: string | null; /** - * Fit To Height - * @description Scale upper layer to fit base height - * @default true + * Usage Info + * @description Usage information for this model */ - fit_to_height?: boolean; + usage_info: string | null; /** - * @description The bottom image to blend - * @default null + * Type + * @default main + * @constant */ - layer_base?: components["schemas"]["ImageField"] | null; + type: "main"; /** - * Color Space - * @description Available color spaces for blend computations - * @default RGB - * @enum {string} + * Trigger Phrases + * @description Set of trigger phrases for this model */ - color_space?: "RGB" | "Linear RGB" | "HSL (RGB)" | "HSV (RGB)" | "Okhsl" | "Okhsv" | "Oklch (Oklab)" | "LCh (CIELab)"; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Adaptive Gamut - * @description Adaptive gamut clipping (0=off). Higher prioritizes chroma over lightness - * @default 0 + * Config Path + * @description Path to the config for this model, if any. */ - adaptive_gamut?: number; + config_path: string | null; /** - * High Precision - * @description Use more steps in computing gamut when possible - * @default true + * Format + * @default checkpoint + * @constant */ - high_precision?: boolean; + format: "checkpoint"; + prediction_type: components["schemas"]["SchedulerPredictionType"]; + variant: components["schemas"]["ModelVariantType"]; /** - * type - * @default invokeai_img_blend + * Base + * @default sdxl * @constant */ - type: "invokeai_img_blend"; + base: "sdxl"; }; - /** - * Image Compositor - * @description Removes backdrop from subject image then overlays subject on background image. Originally created by @dwringer - */ - InvokeImageCompositorInvocation: { + /** Main_Diffusers_CogView4_Config */ + Main_Diffusers_CogView4_Config: { /** - * @description The board to save the image to - * @default null + * Key + * @description A unique key for this model. */ - board?: components["schemas"]["BoardField"] | null; + key: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Hash + * @description The hash of the model file(s). */ - metadata?: components["schemas"]["MetadataField"] | null; + hash: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - id: string; + path: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * File Size + * @description The size of the model in bytes. */ - is_intermediate?: boolean; + file_size: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Name + * @description Name of the model. */ - use_cache?: boolean; + name: string; /** - * @description Image of the subject on a plain monochrome background - * @default null + * Description + * @description Model description */ - image_subject?: components["schemas"]["ImageField"] | null; + description: string | null; /** - * @description Image of a background scene - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - image_background?: components["schemas"]["ImageField"] | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Chroma Key - * @description Can be empty for corner flood select, or CSS-3 color or tuple - * @default + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - chroma_key?: string; + source_api_response: string | null; /** - * Threshold - * @description Subject isolation flood-fill threshold - * @default 50 + * Cover Image + * @description Url for image to preview model */ - threshold?: number; + cover_image: string | null; /** - * Fill X - * @description Scale base subject image to fit background width - * @default false + * Usage Info + * @description Usage information for this model */ - fill_x?: boolean; - /** - * Fill Y - * @description Scale base subject image to fit background height - * @default true + usage_info: string | null; + /** + * Type + * @default main + * @constant */ - fill_y?: boolean; + type: "main"; /** - * X Offset - * @description x-offset for the subject - * @default 0 + * Trigger Phrases + * @description Set of trigger phrases for this model */ - x_offset?: number; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Y Offset - * @description y-offset for the subject - * @default 0 + * Format + * @default diffusers + * @constant */ - y_offset?: number; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * type - * @default invokeai_img_composite + * Base + * @default cogview4 * @constant */ - type: "invokeai_img_composite"; + base: "cogview4"; }; - /** - * Image Dilate or Erode - * @description Dilate (expand) or erode (contract) an image. Originally created by @dwringer - */ - InvokeImageDilateOrErodeInvocation: { + /** Main_Diffusers_SD1_Config */ + Main_Diffusers_SD1_Config: { /** - * @description Optional metadata to be saved with the image - * @default null + * Key + * @description A unique key for this model. */ - metadata?: components["schemas"]["MetadataField"] | null; + key: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Hash + * @description The hash of the model file(s). */ - id: string; + hash: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - is_intermediate?: boolean; + path: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * File Size + * @description The size of the model in bytes. */ - use_cache?: boolean; + file_size: number; /** - * @description The image from which to create a mask - * @default null + * Name + * @description Name of the model. */ - image?: components["schemas"]["ImageField"] | null; + name: string; /** - * Lightness Only - * @description If true, only applies to image lightness (CIELa*b*) - * @default false + * Description + * @description Model description */ - lightness_only?: boolean; + description: string | null; /** - * Radius W - * @description Width (in pixels) by which to dilate(expand) or erode (contract) the image - * @default 4 + * Source + * @description The original source of the model (path, URL or repo_id). */ - radius_w?: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Radius H - * @description Height (in pixels) by which to dilate(expand) or erode (contract) the image - * @default 4 + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - radius_h?: number; + source_api_response: string | null; /** - * Mode - * @description How to operate on the image - * @default Dilate - * @enum {string} + * Cover Image + * @description Url for image to preview model */ - mode?: "Dilate" | "Erode"; + cover_image: string | null; /** - * type - * @default invokeai_img_dilate_erode - * @constant + * Usage Info + * @description Usage information for this model */ - type: "invokeai_img_dilate_erode"; - }; - /** - * Enhance Image - * @description Applies processing from PIL's ImageEnhance module. Originally created by @dwringer - */ - InvokeImageEnhanceInvocation: { + usage_info: string | null; /** - * @description The board to save the image to - * @default null + * Type + * @default main + * @constant */ - board?: components["schemas"]["BoardField"] | null; + type: "main"; /** - * @description Optional metadata to be saved with the image - * @default null + * Trigger Phrases + * @description Set of trigger phrases for this model */ - metadata?: components["schemas"]["MetadataField"] | null; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Format + * @default diffusers + * @constant */ - id: string; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; + prediction_type: components["schemas"]["SchedulerPredictionType"]; + variant: components["schemas"]["ModelVariantType"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Base + * @default sd-1 + * @constant */ - is_intermediate?: boolean; + base: "sd-1"; + }; + /** Main_Diffusers_SD2_Config */ + Main_Diffusers_SD2_Config: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Key + * @description A unique key for this model. */ - use_cache?: boolean; + key: string; /** - * @description The image for which to apply processing - * @default null + * Hash + * @description The hash of the model file(s). */ - image?: components["schemas"]["ImageField"] | null; + hash: string; /** - * Invert - * @description Whether to invert the image colors - * @default false + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - invert?: boolean; + path: string; /** - * Color - * @description Color enhancement factor - * @default 1 + * File Size + * @description The size of the model in bytes. */ - color?: number; + file_size: number; /** - * Contrast - * @description Contrast enhancement factor - * @default 1 + * Name + * @description Name of the model. */ - contrast?: number; + name: string; /** - * Brightness - * @description Brightness enhancement factor - * @default 1 + * Description + * @description Model description */ - brightness?: number; + description: string | null; /** - * Sharpness - * @description Sharpness enhancement factor - * @default 1 + * Source + * @description The original source of the model (path, URL or repo_id). */ - sharpness?: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * type - * @default invokeai_img_enhance - * @constant + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - type: "invokeai_img_enhance"; - }; - /** - * Image Value Thresholds - * @description Clip image to pure black/white past specified thresholds. Originally created by @dwringer - */ - InvokeImageValueThresholdsInvocation: { + source_api_response: string | null; /** - * @description The board to save the image to - * @default null + * Cover Image + * @description Url for image to preview model */ - board?: components["schemas"]["BoardField"] | null; + cover_image: string | null; /** - * @description Optional metadata to be saved with the image - * @default null + * Usage Info + * @description Usage information for this model */ - metadata?: components["schemas"]["MetadataField"] | null; + usage_info: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Type + * @default main + * @constant */ - id: string; + type: "main"; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Trigger Phrases + * @description Set of trigger phrases for this model */ - is_intermediate?: boolean; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Format + * @default diffusers + * @constant */ - use_cache?: boolean; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; + prediction_type: components["schemas"]["SchedulerPredictionType"]; + variant: components["schemas"]["ModelVariantType"]; /** - * @description The image from which to create a mask - * @default null + * Base + * @default sd-2 + * @constant */ - image?: components["schemas"]["ImageField"] | null; + base: "sd-2"; + }; + /** Main_Diffusers_SD3_Config */ + Main_Diffusers_SD3_Config: { /** - * Invert Output - * @description Make light areas dark and vice versa - * @default false + * Key + * @description A unique key for this model. */ - invert_output?: boolean; + key: string; /** - * Renormalize Values - * @description Rescale remaining values from minimum to maximum - * @default false + * Hash + * @description The hash of the model file(s). */ - renormalize_values?: boolean; + hash: string; /** - * Lightness Only - * @description If true, only applies to image lightness (CIELa*b*) - * @default false + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - lightness_only?: boolean; - /** - * Threshold Upper - * @description Threshold above which will be set to full value - * @default 0.5 + path: string; + /** + * File Size + * @description The size of the model in bytes. */ - threshold_upper?: number; + file_size: number; /** - * Threshold Lower - * @description Threshold below which will be set to minimum value - * @default 0.5 + * Name + * @description Name of the model. */ - threshold_lower?: number; + name: string; /** - * type - * @default invokeai_img_val_thresholds - * @constant + * Description + * @description Model description */ - type: "invokeai_img_val_thresholds"; - }; - /** - * ItemIdsResult - * @description Response containing ordered item ids with metadata for optimistic updates. - */ - ItemIdsResult: { + description: string | null; /** - * Item Ids - * @description Ordered list of item ids + * Source + * @description The original source of the model (path, URL or repo_id). */ - item_ids: number[]; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Total Count - * @description Total number of queue items matching the query + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - total_count: number; - }; - /** - * IterateInvocation - * @description Iterates over a list of items - */ - IterateInvocation: { + source_api_response: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Cover Image + * @description Url for image to preview model */ - id: string; + cover_image: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Usage Info + * @description Usage information for this model */ - is_intermediate?: boolean; + usage_info: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Type + * @default main + * @constant */ - use_cache?: boolean; + type: "main"; /** - * Collection - * @description The list of items to iterate over - * @default [] + * Trigger Phrases + * @description Set of trigger phrases for this model */ - collection?: unknown[]; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Index - * @description The index, will be provided on executed iterators - * @default 0 + * Format + * @default diffusers + * @constant */ - index?: number; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * type - * @default iterate + * Base + * @default sd-3 * @constant */ - type: "iterate"; + base: "sd-3"; + /** + * Submodels + * @description Loadable submodels in this model + */ + submodels: { + [key: string]: components["schemas"]["SubmodelDefinition"]; + } | null; }; - /** - * IterateInvocationOutput - * @description Used to connect iteration outputs. Will be expanded to a specific output. - */ - IterateInvocationOutput: { + /** Main_Diffusers_SDXLRefiner_Config */ + Main_Diffusers_SDXLRefiner_Config: { /** - * Collection Item - * @description The item being iterated over + * Key + * @description A unique key for this model. */ - item: unknown; + key: string; /** - * Index - * @description The index of the item + * Hash + * @description The hash of the model file(s). */ - index: number; + hash: string; /** - * Total - * @description The total number of items + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - total: number; + path: string; /** - * type - * @default iterate_output - * @constant + * File Size + * @description The size of the model in bytes. */ - type: "iterate_output"; - }; - JsonValue: unknown; - /** - * LaMa Infill - * @description Infills transparent areas of an image using the LaMa model - */ - LaMaInfillInvocation: { + file_size: number; /** - * @description The board to save the image to - * @default null + * Name + * @description Name of the model. */ - board?: components["schemas"]["BoardField"] | null; + name: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Description + * @description Model description */ - metadata?: components["schemas"]["MetadataField"] | null; + description: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source + * @description The original source of the model (path, URL or repo_id). */ - id: string; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - is_intermediate?: boolean; + source_api_response: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Cover Image + * @description Url for image to preview model */ - use_cache?: boolean; + cover_image: string | null; /** - * @description The image to process - * @default null + * Usage Info + * @description Usage information for this model */ - image?: components["schemas"]["ImageField"] | null; + usage_info: string | null; /** - * type - * @default infill_lama + * Type + * @default main * @constant */ - type: "infill_lama"; - }; - /** - * Latents Collection Primitive - * @description A collection of latents tensor primitive values - */ - LatentsCollectionInvocation: { + type: "main"; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Trigger Phrases + * @description Set of trigger phrases for this model */ - id: string; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Format + * @default diffusers + * @constant */ - is_intermediate?: boolean; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; + prediction_type: components["schemas"]["SchedulerPredictionType"]; + variant: components["schemas"]["ModelVariantType"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Base + * @default sdxl-refiner + * @constant */ - use_cache?: boolean; + base: "sdxl-refiner"; + }; + /** Main_Diffusers_SDXL_Config */ + Main_Diffusers_SDXL_Config: { /** - * Collection - * @description The collection of latents tensors - * @default null + * Key + * @description A unique key for this model. */ - collection?: components["schemas"]["LatentsField"][] | null; + key: string; /** - * type - * @default latents_collection - * @constant + * Hash + * @description The hash of the model file(s). */ - type: "latents_collection"; - }; - /** - * LatentsCollectionOutput - * @description Base class for nodes that output a collection of latents tensors - */ - LatentsCollectionOutput: { + hash: string; /** - * Collection - * @description Latents tensor + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - collection: components["schemas"]["LatentsField"][]; + path: string; /** - * type - * @default latents_collection_output - * @constant + * File Size + * @description The size of the model in bytes. */ - type: "latents_collection_output"; - }; - /** - * LatentsField - * @description A latents tensor primitive field - */ - LatentsField: { + file_size: number; /** - * Latents Name - * @description The name of the latents + * Name + * @description Name of the model. */ - latents_name: string; + name: string; /** - * Seed - * @description Seed used to generate this latents - * @default null + * Description + * @description Model description */ - seed?: number | null; - }; - /** - * Latents Primitive - * @description A latents tensor primitive value - */ - LatentsInvocation: { + description: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source + * @description The original source of the model (path, URL or repo_id). */ - id: string; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - is_intermediate?: boolean; + source_api_response: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Cover Image + * @description Url for image to preview model */ - use_cache?: boolean; + cover_image: string | null; /** - * @description The latents tensor - * @default null + * Usage Info + * @description Usage information for this model */ - latents?: components["schemas"]["LatentsField"] | null; + usage_info: string | null; /** - * type - * @default latents + * Type + * @default main * @constant */ - type: "latents"; - }; - /** - * LatentsMetaOutput - * @description Latents + metadata - */ - LatentsMetaOutput: { - /** @description Metadata Dict */ - metadata: components["schemas"]["MetadataField"]; + type: "main"; /** - * type - * @default latents_meta_output - * @constant + * Trigger Phrases + * @description Set of trigger phrases for this model */ - type: "latents_meta_output"; - /** @description Latents tensor */ - latents: components["schemas"]["LatentsField"]; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Width - * @description Width of output (px) + * Format + * @default diffusers + * @constant */ - width: number; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; + prediction_type: components["schemas"]["SchedulerPredictionType"]; + variant: components["schemas"]["ModelVariantType"]; /** - * Height - * @description Height of output (px) + * Base + * @default sdxl + * @constant */ - height: number; + base: "sdxl"; }; - /** - * LatentsOutput - * @description Base class for nodes that output a single latents tensor - */ - LatentsOutput: { - /** @description Latents tensor */ - latents: components["schemas"]["LatentsField"]; + /** Main_ExternalAPI_ChatGPT4o_Config */ + Main_ExternalAPI_ChatGPT4o_Config: { /** - * Width - * @description Width of output (px) + * Key + * @description A unique key for this model. */ - width: number; + key: string; /** - * Height - * @description Height of output (px) + * Hash + * @description The hash of the model file(s). */ - height: number; + hash: string; /** - * type - * @default latents_output - * @constant + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - type: "latents_output"; - }; - /** - * Latents to Image - SD1.5, SDXL - * @description Generates an image from latents. - */ - LatentsToImageInvocation: { + path: string; /** - * @description The board to save the image to - * @default null + * File Size + * @description The size of the model in bytes. */ - board?: components["schemas"]["BoardField"] | null; + file_size: number; /** - * @description Optional metadata to be saved with the image - * @default null + * Name + * @description Name of the model. */ - metadata?: components["schemas"]["MetadataField"] | null; + name: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Description + * @description Model description */ - id: string; + description: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source + * @description The original source of the model (path, URL or repo_id). */ - is_intermediate?: boolean; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - use_cache?: boolean; + source_api_response: string | null; /** - * @description Latents tensor - * @default null + * Cover Image + * @description Url for image to preview model */ - latents?: components["schemas"]["LatentsField"] | null; + cover_image: string | null; /** - * @description VAE - * @default null + * Usage Info + * @description Usage information for this model */ - vae?: components["schemas"]["VAEField"] | null; + usage_info: string | null; /** - * Tiled - * @description Processing using overlapping tiles (reduce memory consumption) - * @default false + * Type + * @default main + * @constant */ - tiled?: boolean; + type: "main"; /** - * Tile Size - * @description The tile size for VAE tiling in pixels (image space). If set to 0, the default tile size for the model will be used. Larger tile sizes generally produce better results at the cost of higher memory usage. - * @default 0 + * Trigger Phrases + * @description Set of trigger phrases for this model */ - tile_size?: number; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Fp32 - * @description Whether or not to use full float32 precision - * @default false + * Format + * @default api + * @constant */ - fp32?: boolean; + format: "api"; /** - * type - * @default l2i + * Base + * @default chatgpt-4o * @constant */ - type: "l2i"; + base: "chatgpt-4o"; }; - /** - * Lineart Anime Edge Detection - * @description Geneartes an edge map using the Lineart model. - */ - LineartAnimeEdgeDetectionInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + /** Main_ExternalAPI_FluxKontext_Config */ + Main_ExternalAPI_FluxKontext_Config: { /** - * @description Optional metadata to be saved with the image - * @default null + * Key + * @description A unique key for this model. */ - metadata?: components["schemas"]["MetadataField"] | null; + key: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Hash + * @description The hash of the model file(s). */ - id: string; + hash: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - is_intermediate?: boolean; + path: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * File Size + * @description The size of the model in bytes. */ - use_cache?: boolean; + file_size: number; /** - * @description The image to process - * @default null + * Name + * @description Name of the model. */ - image?: components["schemas"]["ImageField"] | null; + name: string; /** - * type - * @default lineart_anime_edge_detection - * @constant + * Description + * @description Model description */ - type: "lineart_anime_edge_detection"; - }; - /** - * Lineart Edge Detection - * @description Generates an edge map using the Lineart model. - */ - LineartEdgeDetectionInvocation: { + description: string | null; /** - * @description The board to save the image to - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - board?: components["schemas"]["BoardField"] | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * @description Optional metadata to be saved with the image - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - metadata?: components["schemas"]["MetadataField"] | null; + source_api_response: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Cover Image + * @description Url for image to preview model */ - id: string; + cover_image: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Usage Info + * @description Usage information for this model */ - is_intermediate?: boolean; + usage_info: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Type + * @default main + * @constant */ - use_cache?: boolean; + type: "main"; /** - * @description The image to process - * @default null + * Trigger Phrases + * @description Set of trigger phrases for this model */ - image?: components["schemas"]["ImageField"] | null; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Coarse - * @description Whether to use coarse mode - * @default false + * Format + * @default api + * @constant */ - coarse?: boolean; + format: "api"; /** - * type - * @default lineart_edge_detection + * Base + * @default flux-kontext * @constant */ - type: "lineart_edge_detection"; + base: "flux-kontext"; }; - /** - * LlavaOnevisionConfig - * @description Model config for Llava Onevision models. - */ - LlavaOnevisionConfig: { + /** Main_ExternalAPI_Gemini2_5_Config */ + Main_ExternalAPI_Gemini2_5_Config: { /** * Key * @description A unique key for this model. @@ -14037,19 +17180,10 @@ export type components = { */ name: string; /** - * Type - * @default llava_onevision - * @constant - */ - type: "llava_onevision"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -14057,135 +17191,49 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; - /** @default */ - repo_variant?: components["schemas"]["ModelRepoVariant"] | null; - }; - /** - * LLaVA OneVision VLLM - * @description Run a LLaVA OneVision VLLM model. - */ - LlavaOnevisionVllmInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * Images - * @description Input image. - * @default null - */ - images?: (components["schemas"]["ImageField"][] | components["schemas"]["ImageField"]) | null; - /** - * Prompt - * @description Input text prompt. - * @default - */ - prompt?: string; - /** - * LLaVA Model Type - * @description The VLLM model to use - * @default null - */ - vllm_model?: components["schemas"]["ModelIdentifierField"] | null; + usage_info: string | null; /** - * type - * @default llava_onevision_vllm + * Type + * @default main * @constant */ - type: "llava_onevision_vllm"; - }; - /** - * Apply LoRA Collection - SD1.5 - * @description Applies a collection of LoRAs to the provided UNet and CLIP models. - */ - LoRACollectionLoader: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * LoRAs - * @description LoRA models and weights. May be a single LoRA or collection. - * @default null - */ - loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; + type: "main"; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Trigger Phrases + * @description Set of trigger phrases for this model */ - unet?: components["schemas"]["UNetField"] | null; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Format + * @default api + * @constant */ - clip?: components["schemas"]["CLIPField"] | null; + format: "api"; /** - * type - * @default lora_collection_loader + * Base + * @default gemini-2.5 * @constant */ - type: "lora_collection_loader"; + base: "gemini-2.5"; }; - /** - * LoRADiffusersConfig - * @description Model config for LoRA/Diffusers models. - */ - LoRADiffusersConfig: { + /** Main_ExternalAPI_Imagen3_Config */ + Main_ExternalAPI_Imagen3_Config: { /** * Key * @description A unique key for this model. @@ -14212,19 +17260,10 @@ export type components = { */ name: string; /** - * Type - * @default lora - * @constant - */ - type: "lora"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -14232,133 +17271,132 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; + usage_info: string | null; + /** + * Type + * @default main + * @constant + */ + type: "main"; /** * Trigger Phrases * @description Set of trigger phrases for this model */ - trigger_phrases?: string[] | null; + trigger_phrases: string[] | null; /** @description Default settings for this model */ - default_settings?: components["schemas"]["LoraModelDefaultSettings"] | null; - }; - /** LoRAField */ - LoRAField: { - /** @description Info to load lora model */ - lora: components["schemas"]["ModelIdentifierField"]; + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * Weight - * @description Weight to apply to lora model + * Format + * @default api + * @constant */ - weight: number; + format: "api"; + /** + * Base + * @default imagen3 + * @constant + */ + base: "imagen3"; }; - /** - * Apply LoRA - SD1.5 - * @description Apply selected lora to unet and text_encoder. - */ - LoRALoaderInvocation: { + /** Main_ExternalAPI_Imagen4_Config */ + Main_ExternalAPI_Imagen4_Config: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Key + * @description A unique key for this model. */ - id: string; + key: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Hash + * @description The hash of the model file(s). */ - is_intermediate?: boolean; + hash: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - use_cache?: boolean; + path: string; /** - * LoRA - * @description LoRA model to load - * @default null + * File Size + * @description The size of the model in bytes. */ - lora?: components["schemas"]["ModelIdentifierField"] | null; + file_size: number; /** - * Weight - * @description The weight at which the LoRA is applied to each model - * @default 0.75 + * Name + * @description Name of the model. */ - weight?: number; + name: string; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Description + * @description Model description */ - unet?: components["schemas"]["UNetField"] | null; + description: string | null; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - clip?: components["schemas"]["CLIPField"] | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * type - * @default lora_loader - * @constant + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - type: "lora_loader"; - }; - /** - * LoRALoaderOutput - * @description Model loader output - */ - LoRALoaderOutput: { + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Type + * @default main + * @constant */ - unet: components["schemas"]["UNetField"] | null; + type: "main"; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Trigger Phrases + * @description Set of trigger phrases for this model */ - clip: components["schemas"]["CLIPField"] | null; + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; /** - * type - * @default lora_loader_output + * Format + * @default api * @constant */ - type: "lora_loader_output"; + format: "api"; + /** + * Base + * @default imagen4 + * @constant + */ + base: "imagen4"; }; /** - * LoRALyCORISConfig - * @description Model config for LoRA/Lycoris models. + * Main_GGUF_FLUX_Config + * @description Model config for main checkpoint models. */ - LoRALyCORISConfig: { + Main_GGUF_FLUX_Config: { /** * Key * @description A unique key for this model. @@ -14385,19 +17423,10 @@ export type components = { */ name: string; /** - * Type - * @default lora - * @constant - */ - type: "lora"; - /** - * Format - * @default lycoris - * @constant + * Description + * @description Model description */ - format: "lycoris"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -14405,142 +17434,235 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; + usage_info: string | null; + /** + * Type + * @default main + * @constant + */ + type: "main"; /** * Trigger Phrases * @description Set of trigger phrases for this model */ - trigger_phrases?: string[] | null; + trigger_phrases: string[] | null; /** @description Default settings for this model */ - default_settings?: components["schemas"]["LoraModelDefaultSettings"] | null; + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + /** + * Config Path + * @description Path to the config for this model, if any. + */ + config_path: string | null; + /** + * Base + * @default flux + * @constant + */ + base: "flux"; + /** + * Format + * @default gguf_quantized + * @constant + */ + format: "gguf_quantized"; + variant: components["schemas"]["FluxVariantType"]; }; /** - * LoRAMetadataField - * @description LoRA Metadata Field + * Combine Masks + * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. */ - LoRAMetadataField: { - /** @description LoRA model to load */ - model: components["schemas"]["ModelIdentifierField"]; + MaskCombineInvocation: { /** - * Weight - * @description The weight at which the LoRA is applied to each model + * @description The board to save the image to + * @default null */ - weight: number; - }; - /** LoRAOmiConfig */ - LoRAOmiConfig: { + board?: components["schemas"]["BoardField"] | null; /** - * Key - * @description A unique key for this model. + * @description Optional metadata to be saved with the image + * @default null */ - key: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Hash - * @description The hash of the model file(s). + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - hash: string; + id: string; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - path: string; + is_intermediate?: boolean; /** - * File Size - * @description The size of the model in bytes. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - file_size: number; + use_cache?: boolean; /** - * Name - * @description Name of the model. + * @description The first mask to combine + * @default null */ - name: string; + mask1?: components["schemas"]["ImageField"] | null; /** - * Type - * @default lora + * @description The second image to combine + * @default null + */ + mask2?: components["schemas"]["ImageField"] | null; + /** + * type + * @default mask_combine * @constant */ - type: "lora"; + type: "mask_combine"; + }; + /** + * Mask Edge + * @description Applies an edge mask to an image + */ + MaskEdgeInvocation: { /** - * Format - * @default omi + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to apply the mask to + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * Edge Size + * @description The size of the edge + * @default null + */ + edge_size?: number | null; + /** + * Edge Blur + * @description The amount of blur on the edge + * @default null + */ + edge_blur?: number | null; + /** + * Low Threshold + * @description First threshold for the hysteresis procedure in Canny edge detection + * @default null + */ + low_threshold?: number | null; + /** + * High Threshold + * @description Second threshold for the hysteresis procedure in Canny edge detection + * @default null + */ + high_threshold?: number | null; + /** + * type + * @default mask_edge * @constant */ - format: "omi"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + type: "mask_edge"; + }; + /** + * Mask from Alpha + * @description Extracts the alpha channel of an image as a mask. + */ + MaskFromAlphaInvocation: { /** - * Source - * @description The original source of the model (path, URL or repo_id). + * @description The board to save the image to + * @default null */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + board?: components["schemas"]["BoardField"] | null; /** - * Description - * @description Model description + * @description Optional metadata to be saved with the image + * @default null */ - description?: string | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - source_api_response?: string | null; + id: string; /** - * Cover Image - * @description Url for image to preview model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - cover_image?: string | null; + is_intermediate?: boolean; /** - * Submodels - * @description Loadable submodels in this model + * Use Cache + * @description Whether or not to use the cache + * @default true */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + use_cache?: boolean; /** - * Usage Info - * @description Usage information for this model + * @description The image to create the mask from + * @default null */ - usage_info?: string | null; + image?: components["schemas"]["ImageField"] | null; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Invert + * @description Whether or not to invert the mask + * @default false + */ + invert?: boolean; + /** + * type + * @default tomask + * @constant */ - trigger_phrases?: string[] | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["LoraModelDefaultSettings"] | null; + type: "tomask"; }; /** - * Select LoRA - * @description Selects a LoRA model and weight. + * Mask from Segmented Image + * @description Generate a mask for a particular color in an ID Map */ - LoRASelectorInvocation: { + MaskFromIDInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -14559,119 +17681,107 @@ export type components = { */ use_cache?: boolean; /** - * LoRA - * @description LoRA model to load + * @description The image to create the mask from * @default null */ - lora?: components["schemas"]["ModelIdentifierField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * Weight - * @description The weight at which the LoRA is applied to each model - * @default 0.75 + * @description ID color to mask + * @default null */ - weight?: number; + color?: components["schemas"]["ColorField"] | null; /** - * type - * @default lora_selector - * @constant + * Threshold + * @description Threshold for color detection + * @default 100 */ - type: "lora_selector"; - }; - /** - * LoRASelectorOutput - * @description Model loader output - */ - LoRASelectorOutput: { + threshold?: number; /** - * LoRA - * @description LoRA model and weight + * Invert + * @description Whether or not to invert the mask + * @default false */ - lora: components["schemas"]["LoRAField"]; + invert?: boolean; /** * type - * @default lora_selector_output + * @default mask_from_id * @constant */ - type: "lora_selector_output"; + type: "mask_from_id"; }; /** - * LocalModelSource - * @description A local file or directory path. + * MaskOutput + * @description A torch mask tensor. */ - LocalModelSource: { - /** Path */ - path: string; + MaskOutput: { + /** @description The mask. */ + mask: components["schemas"]["TensorField"]; /** - * Inplace - * @default false + * Width + * @description The width of the mask in pixels. */ - inplace?: boolean | null; + width: number; /** - * @description discriminator enum property added by openapi-typescript - * @enum {string} + * Height + * @description The height of the mask in pixels. */ - type: "local"; + height: number; + /** + * type + * @default mask_output + * @constant + */ + type: "mask_output"; }; /** - * LogLevel - * @enum {integer} + * Tensor Mask to Image + * @description Convert a mask tensor to an image. */ - LogLevel: 0 | 10 | 20 | 30 | 40 | 50; - /** LoraModelDefaultSettings */ - LoraModelDefaultSettings: { + MaskTensorToImageInvocation: { /** - * Weight - * @description Default weight for this model + * @description The board to save the image to + * @default null */ - weight?: number | null; - }; - /** MDControlListOutput */ - MDControlListOutput: { + board?: components["schemas"]["BoardField"] | null; /** - * ControlNet-List - * @description ControlNet(s) to apply + * @description Optional metadata to be saved with the image + * @default null */ - control_list: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * type - * @default md_control_list_output - * @constant + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - type: "md_control_list_output"; - }; - /** MDIPAdapterListOutput */ - MDIPAdapterListOutput: { + id: string; /** - * IP-Adapter-List - * @description IP-Adapter to apply + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - ip_adapter_list: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + is_intermediate?: boolean; /** - * type - * @default md_ip_adapter_list_output - * @constant + * Use Cache + * @description Whether or not to use the cache + * @default true */ - type: "md_ip_adapter_list_output"; - }; - /** MDT2IAdapterListOutput */ - MDT2IAdapterListOutput: { + use_cache?: boolean; /** - * T2I Adapter-List - * @description T2I-Adapter(s) to apply + * @description The mask tensor to convert. + * @default null */ - t2i_adapter_list: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; + mask?: components["schemas"]["TensorField"] | null; /** * type - * @default md_ip_adapters_output + * @default tensor_mask_to_image * @constant */ - type: "md_ip_adapters_output"; + type: "tensor_mask_to_image"; }; /** - * MLSD Detection - * @description Generates an line segment map using MLSD. + * MediaPipe Face Detection + * @description Detects faces using MediaPipe. */ - MLSDDetectionInvocation: { + MediaPipeFaceDetectionInvocation: { /** * @description The board to save the image to * @default null @@ -14705,495 +17815,573 @@ export type components = { */ image?: components["schemas"]["ImageField"] | null; /** - * Score Threshold - * @description The threshold used to score points when determining line segments - * @default 0.1 + * Max Faces + * @description Maximum number of faces to detect + * @default 1 */ - score_threshold?: number; + max_faces?: number; /** - * Distance Threshold - * @description Threshold for including a line segment - lines shorter than this distance will be discarded - * @default 20 + * Min Confidence + * @description Minimum confidence for face detection + * @default 0.5 */ - distance_threshold?: number; + min_confidence?: number; /** * type - * @default mlsd_detection + * @default mediapipe_face_detection * @constant */ - type: "mlsd_detection"; + type: "mediapipe_face_detection"; }; /** - * MainBnbQuantized4bCheckpointConfig - * @description Model config for main checkpoint models. + * Metadata Merge + * @description Merged a collection of MetadataDict into a single MetadataDict. */ - MainBnbQuantized4bCheckpointConfig: { - /** - * Key - * @description A unique key for this model. - */ - key: string; - /** - * Hash - * @description The hash of the model file(s). - */ - hash: string; + MergeMetadataInvocation: { /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - path: string; + id: string; /** - * File Size - * @description The size of the model in bytes. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - file_size: number; + is_intermediate?: boolean; /** - * Name - * @description Name of the model. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - name: string; + use_cache?: boolean; /** - * Type - * @default main - * @constant + * Collection + * @description Collection of Metadata + * @default null */ - type: "main"; + collection?: components["schemas"]["MetadataField"][] | null; /** - * Format - * @default bnb_quantized_nf4b + * type + * @default merge_metadata * @constant */ - format: "bnb_quantized_nf4b"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; - /** - * Source - * @description The original source of the model (path, URL or repo_id). - */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; + type: "merge_metadata"; + }; + /** + * Merge Tiles to Image + * @description Merge multiple tile images into a single image. + */ + MergeTilesToImageInvocation: { /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * @description The board to save the image to + * @default null */ - source_api_response?: string | null; + board?: components["schemas"]["BoardField"] | null; /** - * Cover Image - * @description Url for image to preview model + * @description Optional metadata to be saved with the image + * @default null */ - cover_image?: string | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Submodels - * @description Loadable submodels in this model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + id: string; /** - * Usage Info - * @description Usage information for this model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - usage_info?: string | null; + is_intermediate?: boolean; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Use Cache + * @description Whether or not to use the cache + * @default true */ - trigger_phrases?: string[] | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["MainModelDefaultSettings"] | null; + use_cache?: boolean; /** - * Variant - * @default normal + * Tiles With Images + * @description A list of tile images with tile properties. + * @default null */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | null; + tiles_with_images?: components["schemas"]["TileWithImage"][] | null; /** - * Config Path - * @description path to the checkpoint model config file + * Blend Mode + * @description blending type Linear or Seam + * @default Seam + * @enum {string} */ - config_path: string; + blend_mode?: "Linear" | "Seam"; /** - * Converted At - * @description When this model was last converted to diffusers + * Blend Amount + * @description The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles. + * @default 32 */ - converted_at?: number | null; - /** @default epsilon */ - prediction_type?: components["schemas"]["SchedulerPredictionType"]; + blend_amount?: number; /** - * Upcast Attention - * @default false + * type + * @default merge_tiles_to_image + * @constant */ - upcast_attention?: boolean; + type: "merge_tiles_to_image"; }; /** - * MainCheckpointConfig - * @description Model config for main checkpoint models. + * MetadataField + * @description Pydantic model for metadata with custom root of type dict[str, Any]. + * Metadata is stored without a strict schema. */ - MainCheckpointConfig: { + MetadataField: Record; + /** + * Metadata Field Extractor + * @description Extracts the text value from an image's metadata given a key. + * Raises an error if the image has no metadata or if the value is not a string (nesting not permitted). + */ + MetadataFieldExtractorInvocation: { /** - * Key - * @description A unique key for this model. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - key: string; + id: string; /** - * Hash - * @description The hash of the model file(s). + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - hash: string; + is_intermediate?: boolean; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - path: string; + use_cache?: boolean; /** - * File Size - * @description The size of the model in bytes. + * @description The image to extract metadata from + * @default null */ - file_size: number; + image?: components["schemas"]["ImageField"] | null; /** - * Name - * @description Name of the model. + * Key + * @description The key in the image's metadata to extract the value from + * @default null */ - name: string; + key?: string | null; /** - * Type - * @default main + * type + * @default metadata_field_extractor * @constant */ - type: "main"; + type: "metadata_field_extractor"; + }; + /** + * Metadata From Image + * @description Used to create a core metadata item then Add/Update it to the provided metadata + */ + MetadataFromImageInvocation: { /** - * Format - * @description Format of the provided checkpoint model - * @default checkpoint - * @enum {string} + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - format: "checkpoint" | "bnb_quantized_nf4b" | "gguf_quantized"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + id: string; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + is_intermediate?: boolean; /** - * Description - * @description Model description + * Use Cache + * @description Whether or not to use the cache + * @default true */ - description?: string | null; + use_cache?: boolean; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * @description The image to process + * @default null */ - source_api_response?: string | null; + image?: components["schemas"]["ImageField"] | null; /** - * Cover Image - * @description Url for image to preview model + * type + * @default metadata_from_image + * @constant */ - cover_image?: string | null; + type: "metadata_from_image"; + }; + /** + * Metadata + * @description Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict. + */ + MetadataInvocation: { /** - * Submodels - * @description Loadable submodels in this model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + id: string; /** - * Usage Info - * @description Usage information for this model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - usage_info?: string | null; + is_intermediate?: boolean; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Use Cache + * @description Whether or not to use the cache + * @default true */ - trigger_phrases?: string[] | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["MainModelDefaultSettings"] | null; + use_cache?: boolean; /** - * Variant - * @default normal + * Items + * @description A single metadata item or collection of metadata items + * @default null */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | null; + items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"] | null; /** - * Config Path - * @description path to the checkpoint model config file + * type + * @default metadata + * @constant */ - config_path: string; + type: "metadata"; + }; + /** MetadataItemField */ + MetadataItemField: { /** - * Converted At - * @description When this model was last converted to diffusers + * Label + * @description Label for this metadata item */ - converted_at?: number | null; - /** @default epsilon */ - prediction_type?: components["schemas"]["SchedulerPredictionType"]; + label: string; /** - * Upcast Attention - * @default false + * Value + * @description The value for this metadata item (may be any type) */ - upcast_attention?: boolean; + value: unknown; }; /** - * MainDiffusersConfig - * @description Model config for main diffusers models. + * Metadata Item + * @description Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value. */ - MainDiffusersConfig: { + MetadataItemInvocation: { /** - * Key - * @description A unique key for this model. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - key: string; + id: string; /** - * Hash - * @description The hash of the model file(s). + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - hash: string; + is_intermediate?: boolean; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - path: string; + use_cache?: boolean; /** - * File Size - * @description The size of the model in bytes. + * Label + * @description Label for this metadata item + * @default null */ - file_size: number; + label?: string | null; /** - * Name - * @description Name of the model. + * Value + * @description The value for this metadata item (may be any type) + * @default null */ - name: string; + value?: unknown | null; /** - * Type - * @default main + * type + * @default metadata_item * @constant */ - type: "main"; + type: "metadata_item"; + }; + /** + * Metadata Item Linked + * @description Used to Create/Add/Update a value into a metadata label + */ + MetadataItemLinkedInvocation: { /** - * Format - * @default diffusers - * @constant + * @description Optional metadata to be saved with the image + * @default null */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + id: string; /** - * Description - * @description Model description + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - description?: string | null; + is_intermediate?: boolean; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} */ - source_api_response?: string | null; + label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt" | "width" | "height" | "seed" | "cfg_scale" | "cfg_rescale_multiplier" | "steps" | "scheduler" | "clip_skip" | "model" | "vae" | "seamless_x" | "seamless_y" | "guidance" | "cfg_scale_start_step" | "cfg_scale_end_step"; /** - * Cover Image - * @description Url for image to preview model + * Custom Label + * @description Label for this metadata item + * @default null */ - cover_image?: string | null; + custom_label?: string | null; /** - * Submodels - * @description Loadable submodels in this model + * Value + * @description The value for this metadata item (may be any type) + * @default null */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + value?: unknown | null; /** - * Usage Info - * @description Usage information for this model + * type + * @default metadata_item_linked + * @constant */ - usage_info?: string | null; + type: "metadata_item_linked"; + }; + /** + * MetadataItemOutput + * @description Metadata Item Output + */ + MetadataItemOutput: { + /** @description Metadata Item */ + item: components["schemas"]["MetadataItemField"]; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * type + * @default metadata_item_output + * @constant */ - trigger_phrases?: string[] | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["MainModelDefaultSettings"] | null; + type: "metadata_item_output"; + }; + /** MetadataOutput */ + MetadataOutput: { + /** @description Metadata Dict */ + metadata: components["schemas"]["MetadataField"]; /** - * Variant - * @default normal + * type + * @default metadata_output + * @constant */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | null; - /** @default */ - repo_variant?: components["schemas"]["ModelRepoVariant"] | null; + type: "metadata_output"; }; /** - * MainGGUFCheckpointConfig - * @description Model config for main checkpoint models. + * Metadata To Bool Collection + * @description Extracts a Boolean value Collection of a label from metadata */ - MainGGUFCheckpointConfig: { + MetadataToBoolCollectionInvocation: { /** - * Key - * @description A unique key for this model. + * @description Optional metadata to be saved with the image + * @default null */ - key: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Hash - * @description The hash of the model file(s). + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - hash: string; + id: string; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - path: string; + is_intermediate?: boolean; /** - * File Size - * @description The size of the model in bytes. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - file_size: number; + use_cache?: boolean; /** - * Name - * @description Name of the model. + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} */ - name: string; + label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y"; /** - * Type - * @default main - * @constant + * Custom Label + * @description Label for this metadata item + * @default null */ - type: "main"; + custom_label?: string | null; /** - * Format - * @default gguf_quantized + * Default Value + * @description The default bool to use if not found in the metadata + * @default null + */ + default_value?: boolean[] | null; + /** + * type + * @default metadata_to_bool_collection * @constant */ - format: "gguf_quantized"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + type: "metadata_to_bool_collection"; + }; + /** + * Metadata To Bool + * @description Extracts a Boolean value of a label from metadata + */ + MetadataToBoolInvocation: { /** - * Source - * @description The original source of the model (path, URL or repo_id). + * @description Optional metadata to be saved with the image + * @default null */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Description - * @description Model description + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - description?: string | null; + id: string; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - source_api_response?: string | null; + is_intermediate?: boolean; /** - * Cover Image - * @description Url for image to preview model + * Use Cache + * @description Whether or not to use the cache + * @default true */ - cover_image?: string | null; + use_cache?: boolean; /** - * Submodels - * @description Loadable submodels in this model + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y"; /** - * Usage Info - * @description Usage information for this model + * Custom Label + * @description Label for this metadata item + * @default null */ - usage_info?: string | null; + custom_label?: string | null; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Default Value + * @description The default bool to use if not found in the metadata + * @default null */ - trigger_phrases?: string[] | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["MainModelDefaultSettings"] | null; + default_value?: boolean | null; /** - * Variant - * @default normal + * type + * @default metadata_to_bool + * @constant */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | null; + type: "metadata_to_bool"; + }; + /** + * Metadata To ControlNets + * @description Extracts a Controlnets value of a label from metadata + */ + MetadataToControlnetsInvocation: { /** - * Config Path - * @description path to the checkpoint model config file + * @description Optional metadata to be saved with the image + * @default null */ - config_path: string; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Converted At - * @description When this model was last converted to diffusers + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - converted_at?: number | null; - /** @default epsilon */ - prediction_type?: components["schemas"]["SchedulerPredictionType"]; + id: string; /** - * Upcast Attention + * Is Intermediate + * @description Whether or not this is an intermediate invocation. * @default false */ - upcast_attention?: boolean; - }; - /** MainModelDefaultSettings */ - MainModelDefaultSettings: { + is_intermediate?: boolean; /** - * Vae - * @description Default VAE for this model (model key) + * Use Cache + * @description Whether or not to use the cache + * @default true */ - vae?: string | null; + use_cache?: boolean; /** - * Vae Precision - * @description Default VAE precision for this model + * ControlNet-List + * @default null */ - vae_precision?: ("fp16" | "fp32") | null; + control_list?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; /** - * Scheduler - * @description Default scheduler for this model + * type + * @default metadata_to_controlnets + * @constant */ - scheduler?: ("ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd") | null; + type: "metadata_to_controlnets"; + }; + /** + * Metadata To Float Collection + * @description Extracts a Float value Collection of a label from metadata + */ + MetadataToFloatCollectionInvocation: { /** - * Steps - * @description Default number of steps for this model + * @description Optional metadata to be saved with the image + * @default null */ - steps?: number | null; + metadata?: components["schemas"]["MetadataField"] | null; /** - * Cfg Scale - * @description Default CFG Scale for this model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - cfg_scale?: number | null; + id: string; /** - * Cfg Rescale Multiplier - * @description Default CFG Rescale Multiplier for this model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - cfg_rescale_multiplier?: number | null; + is_intermediate?: boolean; /** - * Width - * @description Default width for this model + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance"; + /** + * Custom Label + * @description Label for this metadata item + * @default null */ - width?: number | null; + custom_label?: string | null; /** - * Height - * @description Default height for this model + * Default Value + * @description The default float to use if not found in the metadata + * @default null */ - height?: number | null; + default_value?: number[] | null; /** - * Guidance - * @description Default Guidance for this model + * type + * @default metadata_to_float_collection + * @constant */ - guidance?: number | null; + type: "metadata_to_float_collection"; }; /** - * Main Model - SD1.5, SD2 - * @description Loads a main model, outputting its submodels. + * Metadata To Float + * @description Extracts a Float value of a label from metadata */ - MainModelLoaderInvocation: { + MetadataToFloatInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -15212,27 +18400,36 @@ export type components = { */ use_cache?: boolean; /** - * @description Main model (UNet, VAE, CLIP) to load + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance"; + /** + * Custom Label + * @description Label for this metadata item * @default null */ - model?: components["schemas"]["ModelIdentifierField"] | null; + custom_label?: string | null; + /** + * Default Value + * @description The default float to use if not found in the metadata + * @default null + */ + default_value?: number | null; /** * type - * @default main_model_loader + * @default metadata_to_float * @constant */ - type: "main_model_loader"; + type: "metadata_to_float"; }; /** - * Combine Masks - * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. + * Metadata To IP-Adapters + * @description Extracts a IP-Adapters value of a label from metadata */ - MaskCombineInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToIPAdaptersInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15256,32 +18453,23 @@ export type components = { */ use_cache?: boolean; /** - * @description The first mask to combine - * @default null - */ - mask1?: components["schemas"]["ImageField"] | null; - /** - * @description The second image to combine + * IP-Adapter-List + * @description IP-Adapter to apply * @default null */ - mask2?: components["schemas"]["ImageField"] | null; + ip_adapter_list?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; /** * type - * @default mask_combine + * @default metadata_to_ip_adapters * @constant */ - type: "mask_combine"; + type: "metadata_to_ip_adapters"; }; /** - * Mask Edge - * @description Applies an edge mask to an image + * Metadata To Integer Collection + * @description Extracts an integer value Collection of a label from metadata */ - MaskEdgeInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToIntegerCollectionInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15305,51 +18493,36 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to apply the mask to - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * Edge Size - * @description The size of the edge - * @default null - */ - edge_size?: number | null; - /** - * Edge Blur - * @description The amount of blur on the edge - * @default null + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} */ - edge_blur?: number | null; + label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step"; /** - * Low Threshold - * @description First threshold for the hysteresis procedure in Canny edge detection + * Custom Label + * @description Label for this metadata item * @default null */ - low_threshold?: number | null; + custom_label?: string | null; /** - * High Threshold - * @description Second threshold for the hysteresis procedure in Canny edge detection + * Default Value + * @description The default integer to use if not found in the metadata * @default null */ - high_threshold?: number | null; + default_value?: number[] | null; /** * type - * @default mask_edge + * @default metadata_to_integer_collection * @constant */ - type: "mask_edge"; + type: "metadata_to_integer_collection"; }; /** - * Mask from Alpha - * @description Extracts the alpha channel of an image as a mask. + * Metadata To Integer + * @description Extracts an integer value of a label from metadata */ - MaskFromAlphaInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToIntegerInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15373,33 +18546,36 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to create the mask from + * Label + * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step"; + /** + * Custom Label + * @description Label for this metadata item * @default null */ - image?: components["schemas"]["ImageField"] | null; + custom_label?: string | null; /** - * Invert - * @description Whether or not to invert the mask - * @default false + * Default Value + * @description The default integer to use if not found in the metadata + * @default null */ - invert?: boolean; + default_value?: number | null; /** * type - * @default tomask + * @default metadata_to_integer * @constant */ - type: "tomask"; + type: "metadata_to_integer"; }; /** - * Mask from Segmented Image - * @description Generate a mask for a particular color in an ID Map + * Metadata To LoRA Collection + * @description Extracts Lora(s) from metadata into a collection */ - MaskFromIDInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToLorasCollectionInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15423,68 +18599,46 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to create the mask from - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * @description ID color to mask - * @default null - */ - color?: components["schemas"]["ColorField"] | null; - /** - * Threshold - * @description Threshold for color detection - * @default 100 + * Custom Label + * @description Label for this metadata item + * @default loras */ - threshold?: number; + custom_label?: string; /** - * Invert - * @description Whether or not to invert the mask - * @default false + * LoRAs + * @description LoRA models and weights. May be a single LoRA or collection. + * @default [] */ - invert?: boolean; + loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; /** * type - * @default mask_from_id + * @default metadata_to_lora_collection * @constant */ - type: "mask_from_id"; + type: "metadata_to_lora_collection"; }; /** - * MaskOutput - * @description A torch mask tensor. + * MetadataToLorasCollectionOutput + * @description Model loader output */ - MaskOutput: { - /** @description The mask. */ - mask: components["schemas"]["TensorField"]; - /** - * Width - * @description The width of the mask in pixels. - */ - width: number; + MetadataToLorasCollectionOutput: { /** - * Height - * @description The height of the mask in pixels. + * LoRAs + * @description Collection of LoRA model and weights */ - height: number; + lora: components["schemas"]["LoRAField"][]; /** * type - * @default mask_output + * @default metadata_to_lora_collection_output * @constant */ - type: "mask_output"; + type: "metadata_to_lora_collection_output"; }; /** - * Tensor Mask to Image - * @description Convert a mask tensor to an image. + * Metadata To LoRAs + * @description Extracts a Loras value of a label from metadata */ - MaskTensorToImageInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToLorasInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15508,27 +18662,29 @@ export type components = { */ use_cache?: boolean; /** - * @description The mask tensor to convert. + * UNet + * @description UNet (scheduler, LoRAs) * @default null */ - mask?: components["schemas"]["TensorField"] | null; + unet?: components["schemas"]["UNetField"] | null; /** - * type - * @default tensor_mask_to_image - * @constant + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - type: "tensor_mask_to_image"; - }; - /** - * MediaPipe Face Detection - * @description Detects faces using MediaPipe. - */ - MediaPipeFaceDetectionInvocation: { + clip?: components["schemas"]["CLIPField"] | null; /** - * @description The board to save the image to - * @default null + * type + * @default metadata_to_loras + * @constant */ - board?: components["schemas"]["BoardField"] | null; + type: "metadata_to_loras"; + }; + /** + * Metadata To Model + * @description Extracts a Model value of a label from metadata + */ + MetadataToModelInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15552,74 +18708,72 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process - * @default null + * Label + * @description Label for this metadata item + * @default model + * @enum {string} */ - image?: components["schemas"]["ImageField"] | null; + label?: "* CUSTOM LABEL *" | "model"; /** - * Max Faces - * @description Maximum number of faces to detect - * @default 1 + * Custom Label + * @description Label for this metadata item + * @default null */ - max_faces?: number; + custom_label?: string | null; /** - * Min Confidence - * @description Minimum confidence for face detection - * @default 0.5 + * @description The default model to use if not found in the metadata + * @default null */ - min_confidence?: number; + default_value?: components["schemas"]["ModelIdentifierField"] | null; /** * type - * @default mediapipe_face_detection + * @default metadata_to_model * @constant */ - type: "mediapipe_face_detection"; + type: "metadata_to_model"; }; /** - * Metadata Merge - * @description Merged a collection of MetadataDict into a single MetadataDict. + * MetadataToModelOutput + * @description String to main model output */ - MergeMetadataInvocation: { + MetadataToModelOutput: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Model + * @description Main model (UNet, VAE, CLIP) to load */ - id: string; + model: components["schemas"]["ModelIdentifierField"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Name + * @description Model Name */ - is_intermediate?: boolean; + name: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * UNet + * @description UNet (scheduler, LoRAs) */ - use_cache?: boolean; + unet: components["schemas"]["UNetField"]; /** - * Collection - * @description Collection of Metadata - * @default null + * VAE + * @description VAE */ - collection?: components["schemas"]["MetadataField"][] | null; + vae: components["schemas"]["VAEField"]; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; /** * type - * @default merge_metadata + * @default metadata_to_model_output * @constant */ - type: "merge_metadata"; + type: "metadata_to_model_output"; }; /** - * Merge Tiles to Image - * @description Merge multiple tile images into a single image. + * Metadata To SDXL LoRAs + * @description Extracts a SDXL Loras value of a label from metadata */ - MergeTilesToImageInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; + MetadataToSDXLLorasInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15643,43 +18797,40 @@ export type components = { */ use_cache?: boolean; /** - * Tiles With Images - * @description A list of tile images with tile properties. + * UNet + * @description UNet (scheduler, LoRAs) * @default null */ - tiles_with_images?: components["schemas"]["TileWithImage"][] | null; + unet?: components["schemas"]["UNetField"] | null; /** - * Blend Mode - * @description blending type Linear or Seam - * @default Seam - * @enum {string} + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - blend_mode?: "Linear" | "Seam"; + clip?: components["schemas"]["CLIPField"] | null; /** - * Blend Amount - * @description The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles. - * @default 32 + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - blend_amount?: number; + clip2?: components["schemas"]["CLIPField"] | null; /** * type - * @default merge_tiles_to_image + * @default metadata_to_sdlx_loras * @constant */ - type: "merge_tiles_to_image"; + type: "metadata_to_sdlx_loras"; }; /** - * MetadataField - * @description Pydantic model for metadata with custom root of type dict[str, Any]. - * Metadata is stored without a strict schema. - */ - MetadataField: Record; - /** - * Metadata Field Extractor - * @description Extracts the text value from an image's metadata given a key. - * Raises an error if the image has no metadata or if the value is not a string (nesting not permitted). + * Metadata To SDXL Model + * @description Extracts a SDXL Model value of a label from metadata */ - MetadataFieldExtractorInvocation: { + MetadataToSDXLModelInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -15698,62 +18849,82 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to extract metadata from + * Label + * @description Label for this metadata item + * @default model + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "model"; + /** + * Custom Label + * @description Label for this metadata item * @default null */ - image?: components["schemas"]["ImageField"] | null; + custom_label?: string | null; /** - * Key - * @description The key in the image's metadata to extract the value from + * @description The default SDXL Model to use if not found in the metadata * @default null */ - key?: string | null; + default_value?: components["schemas"]["ModelIdentifierField"] | null; /** * type - * @default metadata_field_extractor + * @default metadata_to_sdxl_model * @constant */ - type: "metadata_field_extractor"; + type: "metadata_to_sdxl_model"; }; /** - * Metadata From Image - * @description Used to create a core metadata item then Add/Update it to the provided metadata + * MetadataToSDXLModelOutput + * @description String to SDXL main model output */ - MetadataFromImageInvocation: { + MetadataToSDXLModelOutput: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Model + * @description Main model (UNet, VAE, CLIP) to load */ - id: string; + model: components["schemas"]["ModelIdentifierField"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Name + * @description Model Name */ - is_intermediate?: boolean; + name: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * UNet + * @description UNet (scheduler, LoRAs) */ - use_cache?: boolean; + unet: components["schemas"]["UNetField"]; /** - * @description The image to process - * @default null + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - image?: components["schemas"]["ImageField"] | null; + clip: components["schemas"]["CLIPField"]; + /** + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip2: components["schemas"]["CLIPField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; /** * type - * @default metadata_from_image + * @default metadata_to_sdxl_model_output * @constant */ - type: "metadata_from_image"; + type: "metadata_to_sdxl_model_output"; }; /** - * Metadata - * @description Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict. + * Metadata To Scheduler + * @description Extracts a Scheduler value of a label from metadata */ - MetadataInvocation: { + MetadataToSchedulerInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -15772,36 +18943,42 @@ export type components = { */ use_cache?: boolean; /** - * Items - * @description A single metadata item or collection of metadata items - * @default null + * Label + * @description Label for this metadata item + * @default scheduler + * @enum {string} */ - items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"] | null; + label?: "* CUSTOM LABEL *" | "scheduler"; /** - * type - * @default metadata - * @constant + * Custom Label + * @description Label for this metadata item + * @default null */ - type: "metadata"; - }; - /** MetadataItemField */ - MetadataItemField: { + custom_label?: string | null; /** - * Label - * @description Label for this metadata item + * Default Value + * @description The default scheduler to use if not found in the metadata + * @default euler + * @enum {string} */ - label: string; + default_value?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** - * Value - * @description The value for this metadata item (may be any type) + * type + * @default metadata_to_scheduler + * @constant */ - value: unknown; + type: "metadata_to_scheduler"; }; /** - * Metadata Item - * @description Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value. + * Metadata To String Collection + * @description Extracts a string collection value of a label from metadata */ - MetadataItemInvocation: { + MetadataToStringCollectionInvocation: { + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -15822,27 +18999,34 @@ export type components = { /** * Label * @description Label for this metadata item + * @default * CUSTOM LABEL * + * @enum {string} + */ + label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt"; + /** + * Custom Label + * @description Label for this metadata item * @default null */ - label?: string | null; + custom_label?: string | null; /** - * Value - * @description The value for this metadata item (may be any type) + * Default Value + * @description The default string collection to use if not found in the metadata * @default null */ - value?: unknown | null; + default_value?: string[] | null; /** * type - * @default metadata_item + * @default metadata_to_string_collection * @constant */ - type: "metadata_item"; + type: "metadata_to_string_collection"; }; /** - * Metadata Item Linked - * @description Used to Create/Add/Update a value into a metadata label + * Metadata To String + * @description Extracts a string value of a label from metadata */ - MetadataItemLinkedInvocation: { + MetadataToStringInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15871,7 +19055,7 @@ export type components = { * @default * CUSTOM LABEL * * @enum {string} */ - label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt" | "width" | "height" | "seed" | "cfg_scale" | "cfg_rescale_multiplier" | "steps" | "scheduler" | "clip_skip" | "model" | "vae" | "seamless_x" | "seamless_y" | "guidance" | "cfg_scale_start_step" | "cfg_scale_end_step"; + label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt"; /** * Custom Label * @description Label for this metadata item @@ -15879,48 +19063,63 @@ export type components = { */ custom_label?: string | null; /** - * Value - * @description The value for this metadata item (may be any type) + * Default Value + * @description The default string to use if not found in the metadata * @default null */ - value?: unknown | null; + default_value?: string | null; /** * type - * @default metadata_item_linked + * @default metadata_to_string * @constant */ - type: "metadata_item_linked"; + type: "metadata_to_string"; }; /** - * MetadataItemOutput - * @description Metadata Item Output + * Metadata To T2I-Adapters + * @description Extracts a T2I-Adapters value of a label from metadata */ - MetadataItemOutput: { - /** @description Metadata Item */ - item: components["schemas"]["MetadataItemField"]; + MetadataToT2IAdaptersInvocation: { /** - * type - * @default metadata_item_output - * @constant + * @description Optional metadata to be saved with the image + * @default null */ - type: "metadata_item_output"; - }; - /** MetadataOutput */ - MetadataOutput: { - /** @description Metadata Dict */ - metadata: components["schemas"]["MetadataField"]; + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * T2I-Adapter + * @description IP-Adapter to apply + * @default null + */ + t2i_adapter_list?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; /** * type - * @default metadata_output + * @default metadata_to_t2i_adapters * @constant */ - type: "metadata_output"; + type: "metadata_to_t2i_adapters"; }; /** - * Metadata To Bool Collection - * @description Extracts a Boolean value Collection of a label from metadata + * Metadata To VAE + * @description Extracts a VAE value of a label from metadata */ - MetadataToBoolCollectionInvocation: { + MetadataToVAEInvocation: { /** * @description Optional metadata to be saved with the image * @default null @@ -15946,10 +19145,10 @@ export type components = { /** * Label * @description Label for this metadata item - * @default * CUSTOM LABEL * + * @default vae * @enum {string} */ - label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y"; + label?: "* CUSTOM LABEL *" | "vae"; /** * Custom Label * @description Label for this metadata item @@ -15957,28 +19156,57 @@ export type components = { */ custom_label?: string | null; /** - * Default Value - * @description The default bool to use if not found in the metadata + * @description The default VAE to use if not found in the metadata * @default null */ - default_value?: boolean[] | null; + default_value?: components["schemas"]["VAEField"] | null; /** * type - * @default metadata_to_bool_collection + * @default metadata_to_vae * @constant */ - type: "metadata_to_bool_collection"; + type: "metadata_to_vae"; }; /** - * Metadata To Bool - * @description Extracts a Boolean value of a label from metadata + * ModelFormat + * @description Storage format of model. + * @enum {string} */ - MetadataToBoolInvocation: { + ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "api" | "unknown"; + /** ModelIdentifierField */ + ModelIdentifierField: { /** - * @description Optional metadata to be saved with the image + * Key + * @description The model's unique key + */ + key: string; + /** + * Hash + * @description The model's BLAKE3 hash + */ + hash: string; + /** + * Name + * @description The model's name + */ + name: string; + /** @description The model's base model type */ + base: components["schemas"]["BaseModelType"]; + /** @description The model's type */ + type: components["schemas"]["ModelType"]; + /** + * @description The submodel to load, if this is a main model * @default null */ - metadata?: components["schemas"]["MetadataField"] | null; + submodel_type?: components["schemas"]["SubModelType"] | null; + }; + /** + * Any Model + * @description Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as + * input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an + * error. + */ + ModelIdentifierInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -15997,395 +19225,539 @@ export type components = { */ use_cache?: boolean; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * Model + * @description The model to select + * @default null + */ + model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * type + * @default model_identifier + * @constant + */ + type: "model_identifier"; + }; + /** + * ModelIdentifierOutput + * @description Model identifier output + */ + ModelIdentifierOutput: { + /** + * Model + * @description Model identifier + */ + model: components["schemas"]["ModelIdentifierField"]; + /** + * type + * @default model_identifier_output + * @constant + */ + type: "model_identifier_output"; + }; + /** + * ModelInstallCancelledEvent + * @description Event model for model_install_cancelled + */ + ModelInstallCancelledEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + }; + /** + * ModelInstallCompleteEvent + * @description Event model for model_install_complete + */ + ModelInstallCompleteEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + /** + * Key + * @description Model config record key + */ + key: string; + /** + * Total Bytes + * @description Size of the model (may be None for installation of a local path) + */ + total_bytes: number | null; + /** + * Config + * @description The installed model's config + */ + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; + }; + /** + * ModelInstallDownloadProgressEvent + * @description Event model for model_install_download_progress + */ + ModelInstallDownloadProgressEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Id + * @description The ID of the install job + */ + id: number; + /** + * Source + * @description Source of the model; local path, repo_id or url + */ + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + /** + * Local Path + * @description Where model is downloading to */ - label?: "* CUSTOM LABEL *" | "seamless_x" | "seamless_y"; + local_path: string; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Bytes + * @description Number of bytes downloaded so far */ - custom_label?: string | null; + bytes: number; /** - * Default Value - * @description The default bool to use if not found in the metadata - * @default null + * Total Bytes + * @description Total size of download, including all files */ - default_value?: boolean | null; + total_bytes: number; /** - * type - * @default metadata_to_bool - * @constant + * Parts + * @description Progress of downloading URLs that comprise the model, if any */ - type: "metadata_to_bool"; + parts: { + [key: string]: number | string; + }[]; }; /** - * Metadata To ControlNets - * @description Extracts a Controlnets value of a label from metadata + * ModelInstallDownloadStartedEvent + * @description Event model for model_install_download_started */ - MetadataToControlnetsInvocation: { + ModelInstallDownloadStartedEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description The ID of the install job */ - id: string; + id: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source + * @description Source of the model; local path, repo_id or url */ - is_intermediate?: boolean; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Local Path + * @description Where model is downloading to */ - use_cache?: boolean; + local_path: string; /** - * ControlNet-List - * @default null + * Bytes + * @description Number of bytes downloaded so far */ - control_list?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + bytes: number; /** - * type - * @default metadata_to_controlnets - * @constant + * Total Bytes + * @description Total size of download, including all files */ - type: "metadata_to_controlnets"; + total_bytes: number; + /** + * Parts + * @description Progress of downloading URLs that comprise the model, if any + */ + parts: { + [key: string]: number | string; + }[]; }; /** - * Metadata To Float Collection - * @description Extracts a Float value Collection of a label from metadata + * ModelInstallDownloadsCompleteEvent + * @description Emitted once when an install job becomes active. */ - MetadataToFloatCollectionInvocation: { + ModelInstallDownloadsCompleteEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description The ID of the install job */ - id: string; + id: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source + * @description Source of the model; local path, repo_id or url */ - is_intermediate?: boolean; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + }; + /** + * ModelInstallErrorEvent + * @description Event model for model_install_error + */ + ModelInstallErrorEvent: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Timestamp + * @description The timestamp of the event */ - use_cache?: boolean; + timestamp: number; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * Id + * @description The ID of the install job */ - label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance"; + id: number; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Source + * @description Source of the model; local path, repo_id or url */ - custom_label?: string | null; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; /** - * Default Value - * @description The default float to use if not found in the metadata - * @default null + * Error Type + * @description The name of the exception */ - default_value?: number[] | null; + error_type: string; /** - * type - * @default metadata_to_float_collection - * @constant + * Error + * @description A text description of the exception */ - type: "metadata_to_float_collection"; + error: string; }; /** - * Metadata To Float - * @description Extracts a Float value of a label from metadata + * ModelInstallJob + * @description Object that tracks the current status of an install request. */ - MetadataToFloatInvocation: { + ModelInstallJob: { /** - * @description Optional metadata to be saved with the image - * @default null + * Id + * @description Unique ID for this job */ - metadata?: components["schemas"]["MetadataField"] | null; + id: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * @description Current status of install process + * @default waiting */ - id: string; + status?: components["schemas"]["InstallStatus"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. + * Error Reason + * @description Information about why the job failed + */ + error_reason?: string | null; + /** @description Configuration information (e.g. 'description') to apply to model. */ + config_in?: components["schemas"]["ModelRecordChanges"]; + /** + * Config Out + * @description After successful installation, this will hold the configuration object. + */ + config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]) | null; + /** + * Inplace + * @description Leave model in its current location; otherwise install under models directory * @default false */ - is_intermediate?: boolean; + inplace?: boolean; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Source + * @description Source (URL, repo_id, or local path) of model */ - use_cache?: boolean; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * Local Path + * Format: path + * @description Path to locally-downloaded model; may be the same as the source */ - label?: "* CUSTOM LABEL *" | "cfg_scale" | "cfg_rescale_multiplier" | "guidance"; + local_path: string; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Bytes + * @description For a remote model, the number of bytes downloaded so far (may not be available) + * @default 0 */ - custom_label?: string | null; + bytes?: number; /** - * Default Value - * @description The default float to use if not found in the metadata - * @default null + * Total Bytes + * @description Total size of the model to be installed + * @default 0 */ - default_value?: number | null; + total_bytes?: number; /** - * type - * @default metadata_to_float - * @constant + * Source Metadata + * @description Metadata provided by the model source */ - type: "metadata_to_float"; - }; - /** - * Metadata To IP-Adapters - * @description Extracts a IP-Adapters value of a label from metadata - */ - MetadataToIPAdaptersInvocation: { + source_metadata?: (components["schemas"]["BaseMetadata"] | components["schemas"]["HuggingFaceMetadata"]) | null; /** - * @description Optional metadata to be saved with the image - * @default null + * Download Parts + * @description Download jobs contributing to this install */ - metadata?: components["schemas"]["MetadataField"] | null; + download_parts?: components["schemas"]["DownloadJob"][]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Error + * @description On an error condition, this field will contain the text of the exception */ - id: string; + error?: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Error Traceback + * @description On an error condition, this field will contain the exception traceback */ - is_intermediate?: boolean; + error_traceback?: string | null; + }; + /** + * ModelInstallStartedEvent + * @description Event model for model_install_started + */ + ModelInstallStartedEvent: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Timestamp + * @description The timestamp of the event */ - use_cache?: boolean; + timestamp: number; /** - * IP-Adapter-List - * @description IP-Adapter to apply - * @default null + * Id + * @description The ID of the install job */ - ip_adapter_list?: components["schemas"]["IPAdapterField"] | components["schemas"]["IPAdapterField"][] | null; + id: number; /** - * type - * @default metadata_to_ip_adapters - * @constant + * Source + * @description Source of the model; local path, repo_id or url */ - type: "metadata_to_ip_adapters"; + source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; }; /** - * Metadata To Integer Collection - * @description Extracts an integer value Collection of a label from metadata + * ModelLoadCompleteEvent + * @description Event model for model_load_complete */ - MetadataToIntegerCollectionInvocation: { + ModelLoadCompleteEvent: { /** - * @description Optional metadata to be saved with the image - * @default null + * Timestamp + * @description The timestamp of the event */ - metadata?: components["schemas"]["MetadataField"] | null; + timestamp: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Config + * @description The model's config */ - id: string; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * @description The submodel type, if any + * @default null */ - is_intermediate?: boolean; + submodel_type: components["schemas"]["SubModelType"] | null; + }; + /** + * ModelLoadStartedEvent + * @description Event model for model_load_started + */ + ModelLoadStartedEvent: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Timestamp + * @description The timestamp of the event */ - use_cache?: boolean; + timestamp: number; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * Config + * @description The model's config */ - label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step"; + config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; /** - * Custom Label - * @description Label for this metadata item + * @description The submodel type, if any * @default null */ - custom_label?: string | null; + submodel_type: components["schemas"]["SubModelType"] | null; + }; + /** + * ModelLoaderOutput + * @description Model loader output + */ + ModelLoaderOutput: { /** - * Default Value - * @description The default integer to use if not found in the metadata - * @default null + * VAE + * @description VAE */ - default_value?: number[] | null; + vae: components["schemas"]["VAEField"]; /** * type - * @default metadata_to_integer_collection + * @default model_loader_output * @constant */ - type: "metadata_to_integer_collection"; + type: "model_loader_output"; + /** + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip: components["schemas"]["CLIPField"]; + /** + * UNet + * @description UNet (scheduler, LoRAs) + */ + unet: components["schemas"]["UNetField"]; }; /** - * Metadata To Integer - * @description Extracts an integer value of a label from metadata + * ModelRecordChanges + * @description A set of changes to apply to a model. */ - MetadataToIntegerInvocation: { + ModelRecordChanges: { /** - * @description Optional metadata to be saved with the image - * @default null + * Source + * @description original source of the model */ - metadata?: components["schemas"]["MetadataField"] | null; + source?: string | null; + /** @description type of model source */ + source_type?: components["schemas"]["ModelSourceType"] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source Api Response + * @description metadata from remote source */ - id: string; + source_api_response?: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Name + * @description Name of the model. */ - is_intermediate?: boolean; + name?: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Path + * @description Path to the model. */ - use_cache?: boolean; + path?: string | null; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * Description + * @description Model description */ - label?: "* CUSTOM LABEL *" | "width" | "height" | "seed" | "steps" | "clip_skip" | "cfg_scale_start_step" | "cfg_scale_end_step"; + description?: string | null; + /** @description The base model. */ + base?: components["schemas"]["BaseModelType"] | null; + /** @description Type of model */ + type?: components["schemas"]["ModelType"] | null; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Key + * @description Database ID for this model */ - custom_label?: string | null; + key?: string | null; /** - * Default Value - * @description The default integer to use if not found in the metadata - * @default null + * Hash + * @description hash of model file */ - default_value?: number | null; + hash?: string | null; /** - * type - * @default metadata_to_integer - * @constant + * File Size + * @description Size of model file */ - type: "metadata_to_integer"; - }; - /** - * Metadata To LoRA Collection - * @description Extracts Lora(s) from metadata into a collection - */ - MetadataToLorasCollectionInvocation: { + file_size?: number | null; /** - * @description Optional metadata to be saved with the image - * @default null + * Format + * @description format of model file */ - metadata?: components["schemas"]["MetadataField"] | null; + format?: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Trigger Phrases + * @description Set of trigger phrases for this model */ - id: string; + trigger_phrases?: string[] | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Default Settings + * @description Default settings for this model */ - is_intermediate?: boolean; + default_settings?: components["schemas"]["MainModelDefaultSettings"] | components["schemas"]["LoraModelDefaultSettings"] | components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Variant + * @description The variant of the model. */ - use_cache?: boolean; + variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | null; + /** @description The prediction type of the model. */ + prediction_type?: components["schemas"]["SchedulerPredictionType"] | null; /** - * Custom Label - * @description Label for this metadata item - * @default loras + * Upcast Attention + * @description Whether to upcast attention. */ - custom_label?: string; + upcast_attention?: boolean | null; /** - * LoRAs - * @description LoRA models and weights. May be a single LoRA or collection. - * @default [] + * Config Path + * @description Path to config file for model */ - loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; + config_path?: string | null; + }; + /** ModelRelationshipBatchRequest */ + ModelRelationshipBatchRequest: { /** - * type - * @default metadata_to_lora_collection - * @constant + * Model Keys + * @description List of model keys to fetch related models for */ - type: "metadata_to_lora_collection"; + model_keys: string[]; }; - /** - * MetadataToLorasCollectionOutput - * @description Model loader output - */ - MetadataToLorasCollectionOutput: { + /** ModelRelationshipCreateRequest */ + ModelRelationshipCreateRequest: { /** - * LoRAs - * @description Collection of LoRA model and weights + * Model Key 1 + * @description The key of the first model in the relationship */ - lora: components["schemas"]["LoRAField"][]; + model_key_1: string; /** - * type - * @default metadata_to_lora_collection_output - * @constant + * Model Key 2 + * @description The key of the second model in the relationship */ - type: "metadata_to_lora_collection_output"; + model_key_2: string; + }; + /** + * ModelRepoVariant + * @description Various hugging face variants on the diffusers format. + * @enum {string} + */ + ModelRepoVariant: "" | "fp16" | "fp32" | "onnx" | "openvino" | "flax"; + /** + * ModelSourceType + * @description Model source type. + * @enum {string} + */ + ModelSourceType: "path" | "url" | "hf_repo_id"; + /** + * ModelType + * @description Model type. + * @enum {string} + */ + ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "video" | "unknown"; + /** + * ModelVariantType + * @description Variant type. + * @enum {string} + */ + ModelVariantType: "normal" | "inpaint" | "depth"; + /** + * ModelsList + * @description Return list of configs. + */ + ModelsList: { + /** Models */ + models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"])[]; }; /** - * Metadata To LoRAs - * @description Extracts a Loras value of a label from metadata + * Multiply Integers + * @description Multiplies two numbers */ - MetadataToLorasInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + MultiplyInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -16404,34 +19776,47 @@ export type components = { */ use_cache?: boolean; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * A + * @description The first number + * @default 0 */ - unet?: components["schemas"]["UNetField"] | null; + a?: number; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * B + * @description The second number + * @default 0 */ - clip?: components["schemas"]["CLIPField"] | null; + b?: number; /** * type - * @default metadata_to_loras + * @default mul * @constant */ - type: "metadata_to_loras"; + type: "mul"; }; - /** - * Metadata To Model - * @description Extracts a Model value of a label from metadata - */ - MetadataToModelInvocation: { + /** NodeFieldValue */ + NodeFieldValue: { /** - * @description Optional metadata to be saved with the image - * @default null + * Node Path + * @description The node into which this batch data item will be substituted. */ - metadata?: components["schemas"]["MetadataField"] | null; + node_path: string; + /** + * Field Name + * @description The field into which this batch data item will be substituted. + */ + field_name: string; + /** + * Value + * @description The value to substitute into the node/field. + */ + value: string | number | components["schemas"]["ImageField"]; + }; + /** + * Create Latent Noise + * @description Generates latent noise. + */ + NoiseInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -16450,72 +19835,70 @@ export type components = { */ use_cache?: boolean; /** - * Label - * @description Label for this metadata item - * @default model - * @enum {string} + * Seed + * @description Seed for random number generation + * @default 0 */ - label?: "* CUSTOM LABEL *" | "model"; + seed?: number; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Width + * @description Width of output (px) + * @default 512 */ - custom_label?: string | null; + width?: number; /** - * @description The default model to use if not found in the metadata - * @default null + * Height + * @description Height of output (px) + * @default 512 */ - default_value?: components["schemas"]["ModelIdentifierField"] | null; + height?: number; + /** + * Use Cpu + * @description Use CPU for noise generation (for reproducible results across platforms) + * @default true + */ + use_cpu?: boolean; /** * type - * @default metadata_to_model + * @default noise * @constant */ - type: "metadata_to_model"; + type: "noise"; }; /** - * MetadataToModelOutput - * @description String to main model output + * NoiseOutput + * @description Invocation noise output */ - MetadataToModelOutput: { - /** - * Model - * @description Main model (UNet, VAE, CLIP) to load - */ - model: components["schemas"]["ModelIdentifierField"]; - /** - * Name - * @description Model Name - */ - name: string; - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; + NoiseOutput: { + /** @description Noise tensor */ + noise: components["schemas"]["LatentsField"]; /** - * VAE - * @description VAE + * Width + * @description Width of output (px) */ - vae: components["schemas"]["VAEField"]; + width: number; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Height + * @description Height of output (px) */ - clip: components["schemas"]["CLIPField"]; + height: number; /** * type - * @default metadata_to_model_output + * @default noise_output * @constant */ - type: "metadata_to_model_output"; + type: "noise_output"; }; /** - * Metadata To SDXL LoRAs - * @description Extracts a SDXL Loras value of a label from metadata + * Normal Map + * @description Generates a normal map. */ - MetadataToSDXLLorasInvocation: { + NormalMapInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; /** * @description Optional metadata to be saved with the image * @default null @@ -16539,188 +19922,139 @@ export type components = { */ use_cache?: boolean; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet?: components["schemas"]["UNetField"] | null; - /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip?: components["schemas"]["CLIPField"] | null; - /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @description The image to process * @default null */ - clip2?: components["schemas"]["CLIPField"] | null; + image?: components["schemas"]["ImageField"] | null; /** * type - * @default metadata_to_sdlx_loras + * @default normal_map * @constant */ - type: "metadata_to_sdlx_loras"; + type: "normal_map"; }; - /** - * Metadata To SDXL Model - * @description Extracts a SDXL Model value of a label from metadata - */ - MetadataToSDXLModelInvocation: { + /** OffsetPaginatedResults[BoardDTO] */ + OffsetPaginatedResults_BoardDTO_: { /** - * @description Optional metadata to be saved with the image - * @default null + * Limit + * @description Limit of items to get */ - metadata?: components["schemas"]["MetadataField"] | null; + limit: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Offset + * @description Offset from which to retrieve items */ - id: string; + offset: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Total + * @description Total number of items in result */ - is_intermediate?: boolean; + total: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Items + * @description Items */ - use_cache?: boolean; + items: components["schemas"]["BoardDTO"][]; + }; + /** OffsetPaginatedResults[ImageDTO] */ + OffsetPaginatedResults_ImageDTO_: { /** - * Label - * @description Label for this metadata item - * @default model - * @enum {string} + * Limit + * @description Limit of items to get */ - label?: "* CUSTOM LABEL *" | "model"; + limit: number; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Offset + * @description Offset from which to retrieve items */ - custom_label?: string | null; + offset: number; /** - * @description The default SDXL Model to use if not found in the metadata - * @default null + * Total + * @description Total number of items in result */ - default_value?: components["schemas"]["ModelIdentifierField"] | null; + total: number; /** - * type - * @default metadata_to_sdxl_model - * @constant + * Items + * @description Items */ - type: "metadata_to_sdxl_model"; + items: components["schemas"]["ImageDTO"][]; }; - /** - * MetadataToSDXLModelOutput - * @description String to SDXL main model output - */ - MetadataToSDXLModelOutput: { - /** - * Model - * @description Main model (UNet, VAE, CLIP) to load - */ - model: components["schemas"]["ModelIdentifierField"]; - /** - * Name - * @description Model Name - */ - name: string; - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; + /** OffsetPaginatedResults[VideoDTO] */ + OffsetPaginatedResults_VideoDTO_: { /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Limit + * @description Limit of items to get */ - clip: components["schemas"]["CLIPField"]; + limit: number; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Offset + * @description Offset from which to retrieve items */ - clip2: components["schemas"]["CLIPField"]; + offset: number; /** - * VAE - * @description VAE + * Total + * @description Total number of items in result */ - vae: components["schemas"]["VAEField"]; + total: number; /** - * type - * @default metadata_to_sdxl_model_output - * @constant + * Items + * @description Items */ - type: "metadata_to_sdxl_model_output"; + items: components["schemas"]["VideoDTO"][]; }; /** - * Metadata To Scheduler - * @description Extracts a Scheduler value of a label from metadata + * OutputFieldJSONSchemaExtra + * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor + * during schema parsing and UI rendering. */ - MetadataToSchedulerInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + OutputFieldJSONSchemaExtra: { + field_kind: components["schemas"]["FieldKind"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Ui Hidden + * @default false */ - id: string; + ui_hidden: boolean; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Ui Order + * @default null */ - is_intermediate?: boolean; + ui_order: number | null; + /** @default null */ + ui_type: components["schemas"]["UIType"] | null; + }; + /** PaginatedResults[WorkflowRecordListItemWithThumbnailDTO] */ + PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Page + * @description Current Page */ - use_cache?: boolean; + page: number; /** - * Label - * @description Label for this metadata item - * @default scheduler - * @enum {string} + * Pages + * @description Total number of pages */ - label?: "* CUSTOM LABEL *" | "scheduler"; + pages: number; /** - * Custom Label - * @description Label for this metadata item - * @default null + * Per Page + * @description Number of items per page */ - custom_label?: string | null; + per_page: number; /** - * Default Value - * @description The default scheduler to use if not found in the metadata - * @default euler - * @enum {string} + * Total + * @description Total number of items in result */ - default_value?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + total: number; /** - * type - * @default metadata_to_scheduler - * @constant + * Items + * @description Items */ - type: "metadata_to_scheduler"; + items: components["schemas"]["WorkflowRecordListItemWithThumbnailDTO"][]; }; /** - * Metadata To String Collection - * @description Extracts a string collection value of a label from metadata + * Pair Tile with Image + * @description Pair an image with its tile properties. */ - MetadataToStringCollectionInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + PairTileImageInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -16739,36 +20073,45 @@ export type components = { */ use_cache?: boolean; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * @description The tile image. + * @default null */ - label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt"; + image?: components["schemas"]["ImageField"] | null; /** - * Custom Label - * @description Label for this metadata item + * @description The tile properties. * @default null */ - custom_label?: string | null; + tile?: components["schemas"]["Tile"] | null; /** - * Default Value - * @description The default string collection to use if not found in the metadata - * @default null + * type + * @default pair_tile_image + * @constant */ - default_value?: string[] | null; + type: "pair_tile_image"; + }; + /** PairTileImageOutput */ + PairTileImageOutput: { + /** @description A tile description with its corresponding image. */ + tile_with_image: components["schemas"]["TileWithImage"]; /** * type - * @default metadata_to_string_collection + * @default pair_tile_image_output * @constant */ - type: "metadata_to_string_collection"; + type: "pair_tile_image_output"; }; /** - * Metadata To String - * @description Extracts a string value of a label from metadata + * Paste Image into Bounding Box + * @description Paste the source image into the target image at the given bounding box. + * + * The source image must be the same size as the bounding box, and the bounding box must fit within the target image. */ - MetadataToStringInvocation: { + PasteImageIntoBoundingBoxInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; /** * @description Optional metadata to be saved with the image * @default null @@ -16792,36 +20135,37 @@ export type components = { */ use_cache?: boolean; /** - * Label - * @description Label for this metadata item - * @default * CUSTOM LABEL * - * @enum {string} + * @description The image to paste + * @default null */ - label?: "* CUSTOM LABEL *" | "positive_prompt" | "positive_style_prompt" | "negative_prompt" | "negative_style_prompt"; + source_image?: components["schemas"]["ImageField"] | null; /** - * Custom Label - * @description Label for this metadata item + * @description The image to paste into * @default null */ - custom_label?: string | null; + target_image?: components["schemas"]["ImageField"] | null; /** - * Default Value - * @description The default string to use if not found in the metadata + * @description The bounding box to paste the image into * @default null */ - default_value?: string | null; + bounding_box?: components["schemas"]["BoundingBoxField"] | null; /** * type - * @default metadata_to_string + * @default paste_image_into_bounding_box * @constant */ - type: "metadata_to_string"; + type: "paste_image_into_bounding_box"; }; /** - * Metadata To T2I-Adapters - * @description Extracts a T2I-Adapters value of a label from metadata + * PiDiNet Edge Detection + * @description Generates an edge map using PiDiNet. */ - MetadataToT2IAdaptersInvocation: { + PiDiNetEdgeDetectionInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; /** * @description Optional metadata to be saved with the image * @default null @@ -16845,110 +20189,73 @@ export type components = { */ use_cache?: boolean; /** - * T2I-Adapter - * @description IP-Adapter to apply - * @default null - */ - t2i_adapter_list?: components["schemas"]["T2IAdapterField"] | components["schemas"]["T2IAdapterField"][] | null; - /** - * type - * @default metadata_to_t2i_adapters - * @constant - */ - type: "metadata_to_t2i_adapters"; - }; - /** - * Metadata To VAE - * @description Extracts a VAE value of a label from metadata - */ - MetadataToVAEInvocation: { - /** - * @description Optional metadata to be saved with the image + * @description The image to process * @default null */ - metadata?: components["schemas"]["MetadataField"] | null; - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; + image?: components["schemas"]["ImageField"] | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. + * Quantize Edges + * @description Whether or not to use safe mode * @default false */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; + quantize_edges?: boolean; /** - * Label - * @description Label for this metadata item - * @default vae - * @enum {string} + * Scribble + * @description Whether or not to use scribble mode + * @default false */ - label?: "* CUSTOM LABEL *" | "vae"; + scribble?: boolean; /** - * Custom Label - * @description Label for this metadata item - * @default null + * type + * @default pidi_edge_detection + * @constant */ - custom_label?: string | null; + type: "pidi_edge_detection"; + }; + /** PresetData */ + PresetData: { /** - * @description The default VAE to use if not found in the metadata - * @default null + * Positive Prompt + * @description Positive prompt */ - default_value?: components["schemas"]["VAEField"] | null; + positive_prompt: string; /** - * type - * @default metadata_to_vae - * @constant + * Negative Prompt + * @description Negative prompt */ - type: "metadata_to_vae"; + negative_prompt: string; }; /** - * ModelFormat - * @description Storage format of model. + * PresetType * @enum {string} */ - ModelFormat: "omi" | "diffusers" | "checkpoint" | "lycoris" | "onnx" | "olive" | "embedding_file" | "embedding_folder" | "invokeai" | "t5_encoder" | "bnb_quantized_int8b" | "bnb_quantized_nf4b" | "gguf_quantized" | "api" | "unknown"; - /** ModelIdentifierField */ - ModelIdentifierField: { - /** - * Key - * @description The model's unique key - */ - key: string; + PresetType: "user" | "default" | "project"; + /** + * ProgressImage + * @description The progress image sent intermittently during processing + */ + ProgressImage: { /** - * Hash - * @description The model's BLAKE3 hash + * Width + * @description The effective width of the image in pixels */ - hash: string; + width: number; /** - * Name - * @description The model's name + * Height + * @description The effective height of the image in pixels */ - name: string; - /** @description The model's base model type */ - base: components["schemas"]["BaseModelType"]; - /** @description The model's type */ - type: components["schemas"]["ModelType"]; + height: number; /** - * @description The submodel to load, if this is a main model - * @default null + * Dataurl + * @description The image data as a b64 data URL */ - submodel_type?: components["schemas"]["SubModelType"] | null; + dataURL: string; }; /** - * Any Model - * @description Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as - * input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an - * error. + * Prompts from File + * @description Loads prompts from a text file */ - ModelIdentifierInvocation: { + PromptsFromFileInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -16967,539 +20274,636 @@ export type components = { */ use_cache?: boolean; /** - * Model - * @description The model to select + * File Path + * @description Path to prompt text file * @default null */ - model?: components["schemas"]["ModelIdentifierField"] | null; + file_path?: string | null; + /** + * Pre Prompt + * @description String to prepend to each prompt + * @default null + */ + pre_prompt?: string | null; + /** + * Post Prompt + * @description String to append to each prompt + * @default null + */ + post_prompt?: string | null; + /** + * Start Line + * @description Line in the file to start start from + * @default 1 + */ + start_line?: number; + /** + * Max Prompts + * @description Max lines to read from file (0=all) + * @default 1 + */ + max_prompts?: number; /** * type - * @default model_identifier + * @default prompt_from_file * @constant */ - type: "model_identifier"; + type: "prompt_from_file"; }; /** - * ModelIdentifierOutput - * @description Model identifier output + * PruneResult + * @description Result of pruning the session queue */ - ModelIdentifierOutput: { - /** - * Model - * @description Model identifier - */ - model: components["schemas"]["ModelIdentifierField"]; + PruneResult: { /** - * type - * @default model_identifier_output - * @constant + * Deleted + * @description Number of queue items deleted */ - type: "model_identifier_output"; + deleted: number; }; /** - * ModelInstallCancelledEvent - * @description Event model for model_install_cancelled + * QueueClearedEvent + * @description Event model for queue_cleared */ - ModelInstallCancelledEvent: { + QueueClearedEvent: { /** * Timestamp * @description The timestamp of the event */ timestamp: number; /** - * Id - * @description The ID of the install job - */ - id: number; - /** - * Source - * @description Source of the model; local path, repo_id or url + * Queue Id + * @description The ID of the queue */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + queue_id: string; }; /** - * ModelInstallCompleteEvent - * @description Event model for model_install_complete + * QueueItemStatusChangedEvent + * @description Event model for queue_item_status_changed */ - ModelInstallCompleteEvent: { + QueueItemStatusChangedEvent: { /** * Timestamp * @description The timestamp of the event */ timestamp: number; /** - * Id - * @description The ID of the install job + * Queue Id + * @description The ID of the queue */ - id: number; + queue_id: string; /** - * Source - * @description Source of the model; local path, repo_id or url + * Item Id + * @description The ID of the queue item */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + item_id: number; /** - * Key - * @description Model config record key + * Batch Id + * @description The ID of the queue batch */ - key: string; + batch_id: string; /** - * Total Bytes - * @description Size of the model (may be None for installation of a local path) + * Origin + * @description The origin of the queue item + * @default null */ - total_bytes: number | null; + origin: string | null; /** - * Config - * @description The installed model's config + * Destination + * @description The destination of the queue item + * @default null + */ + destination: string | null; + /** + * Status + * @description The new status of the queue item + * @enum {string} + */ + status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + /** + * Error Type + * @description The error type, if any + * @default null + */ + error_type: string | null; + /** + * Error Message + * @description The error message, if any + * @default null + */ + error_message: string | null; + /** + * Error Traceback + * @description The error traceback, if any + * @default null + */ + error_traceback: string | null; + /** + * Created At + * @description The timestamp when the queue item was created + */ + created_at: string; + /** + * Updated At + * @description The timestamp when the queue item was last updated + */ + updated_at: string; + /** + * Started At + * @description The timestamp when the queue item was started + * @default null + */ + started_at: string | null; + /** + * Completed At + * @description The timestamp when the queue item was completed + * @default null + */ + completed_at: string | null; + /** @description The status of the batch */ + batch_status: components["schemas"]["BatchStatus"]; + /** @description The status of the queue */ + queue_status: components["schemas"]["SessionQueueStatus"]; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Credits + * @description The total credits used for this queue item + * @default null */ - config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]; + credits: number | null; }; /** - * ModelInstallDownloadProgressEvent - * @description Event model for model_install_download_progress + * QueueItemsRetriedEvent + * @description Event model for queue_items_retried */ - ModelInstallDownloadProgressEvent: { + QueueItemsRetriedEvent: { /** * Timestamp * @description The timestamp of the event */ timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Retried Item Ids + * @description The IDs of the queue items that were retried + */ + retried_item_ids: number[]; + }; + /** + * Random Float + * @description Outputs a single random float + */ + RandomFloatInvocation: { /** * Id - * @description The ID of the install job + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - id: number; + id: string; /** - * Source - * @description Source of the model; local path, repo_id or url + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + is_intermediate?: boolean; /** - * Local Path - * @description Where model is downloading to + * Use Cache + * @description Whether or not to use the cache + * @default false */ - local_path: string; + use_cache?: boolean; + /** + * Low + * @description The inclusive low value + * @default 0 + */ + low?: number; /** - * Bytes - * @description Number of bytes downloaded so far + * High + * @description The exclusive high value + * @default 1 */ - bytes: number; + high?: number; /** - * Total Bytes - * @description Total size of download, including all files + * Decimals + * @description The number of decimal places to round to + * @default 2 */ - total_bytes: number; + decimals?: number; /** - * Parts - * @description Progress of downloading URLs that comprise the model, if any + * type + * @default rand_float + * @constant */ - parts: { - [key: string]: number | string; - }[]; + type: "rand_float"; }; /** - * ModelInstallDownloadStartedEvent - * @description Event model for model_install_download_started + * Random Integer + * @description Outputs a single random integer. */ - ModelInstallDownloadStartedEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; + RandomIntInvocation: { /** * Id - * @description The ID of the install job + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - id: number; + id: string; /** - * Source - * @description Source of the model; local path, repo_id or url + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + is_intermediate?: boolean; /** - * Local Path - * @description Where model is downloading to + * Use Cache + * @description Whether or not to use the cache + * @default false */ - local_path: string; + use_cache?: boolean; /** - * Bytes - * @description Number of bytes downloaded so far + * Low + * @description The inclusive low value + * @default 0 */ - bytes: number; + low?: number; /** - * Total Bytes - * @description Total size of download, including all files + * High + * @description The exclusive high value + * @default 2147483647 */ - total_bytes: number; + high?: number; /** - * Parts - * @description Progress of downloading URLs that comprise the model, if any + * type + * @default rand_int + * @constant */ - parts: { - [key: string]: number | string; - }[]; + type: "rand_int"; }; /** - * ModelInstallDownloadsCompleteEvent - * @description Emitted once when an install job becomes active. + * Random Range + * @description Creates a collection of random numbers */ - ModelInstallDownloadsCompleteEvent: { + RandomRangeInvocation: { /** - * Timestamp - * @description The timestamp of the event + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - timestamp: number; + id: string; /** - * Id - * @description The ID of the install job + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - id: number; + is_intermediate?: boolean; /** - * Source - * @description Source of the model; local path, repo_id or url + * Use Cache + * @description Whether or not to use the cache + * @default false */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; - }; - /** - * ModelInstallErrorEvent - * @description Event model for model_install_error - */ - ModelInstallErrorEvent: { + use_cache?: boolean; /** - * Timestamp - * @description The timestamp of the event + * Low + * @description The inclusive low value + * @default 0 */ - timestamp: number; + low?: number; /** - * Id - * @description The ID of the install job + * High + * @description The exclusive high value + * @default 2147483647 */ - id: number; + high?: number; /** - * Source - * @description Source of the model; local path, repo_id or url + * Size + * @description The number of values to generate + * @default 1 */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + size?: number; /** - * Error Type - * @description The name of the exception + * Seed + * @description The seed for the RNG (omit for random) + * @default 0 */ - error_type: string; + seed?: number; /** - * Error - * @description A text description of the exception + * type + * @default random_range + * @constant */ - error: string; + type: "random_range"; }; /** - * ModelInstallJob - * @description Object that tracks the current status of an install request. + * Integer Range + * @description Creates a range of numbers from start to stop with step */ - ModelInstallJob: { + RangeInvocation: { /** * Id - * @description Unique ID for this job + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - id: number; + id: string; /** - * @description Current status of install process - * @default waiting + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - status?: components["schemas"]["InstallStatus"]; + is_intermediate?: boolean; /** - * Error Reason - * @description Information about why the job failed + * Use Cache + * @description Whether or not to use the cache + * @default true */ - error_reason?: string | null; - /** @description Configuration information (e.g. 'description') to apply to model. */ - config_in?: components["schemas"]["ModelRecordChanges"]; + use_cache?: boolean; /** - * Config Out - * @description After successful installation, this will hold the configuration object. + * Start + * @description The start of the range + * @default 0 */ - config_out?: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]) | null; + start?: number; /** - * Inplace - * @description Leave model in its current location; otherwise install under models directory - * @default false + * Stop + * @description The stop of the range + * @default 10 */ - inplace?: boolean; + stop?: number; /** - * Source - * @description Source (URL, repo_id, or local path) of model + * Step + * @description The step of the range + * @default 1 */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; + step?: number; /** - * Local Path - * Format: path - * @description Path to locally-downloaded model; may be the same as the source + * type + * @default range + * @constant */ - local_path: string; + type: "range"; + }; + /** + * Integer Range of Size + * @description Creates a range from start to start + (size * step) incremented by step + */ + RangeOfSizeInvocation: { /** - * Bytes - * @description For a remote model, the number of bytes downloaded so far (may not be available) - * @default 0 + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - bytes?: number; + id: string; /** - * Total Bytes - * @description Total size of the model to be installed - * @default 0 + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - total_bytes?: number; + is_intermediate?: boolean; /** - * Source Metadata - * @description Metadata provided by the model source + * Use Cache + * @description Whether or not to use the cache + * @default true */ - source_metadata?: (components["schemas"]["BaseMetadata"] | components["schemas"]["HuggingFaceMetadata"]) | null; + use_cache?: boolean; /** - * Download Parts - * @description Download jobs contributing to this install + * Start + * @description The start of the range + * @default 0 */ - download_parts?: components["schemas"]["DownloadJob"][]; + start?: number; /** - * Error - * @description On an error condition, this field will contain the text of the exception + * Size + * @description The number of values + * @default 1 */ - error?: string | null; + size?: number; /** - * Error Traceback - * @description On an error condition, this field will contain the exception traceback + * Step + * @description The step of the range + * @default 1 */ - error_traceback?: string | null; + step?: number; + /** + * type + * @default range_of_size + * @constant + */ + type: "range_of_size"; }; /** - * ModelInstallStartedEvent - * @description Event model for model_install_started + * Create Rectangle Mask + * @description Create a rectangular mask. */ - ModelInstallStartedEvent: { + RectangleMaskInvocation: { /** - * Timestamp - * @description The timestamp of the event + * @description Optional metadata to be saved with the image + * @default null */ - timestamp: number; + metadata?: components["schemas"]["MetadataField"] | null; /** * Id - * @description The ID of the install job + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - id: number; + id: string; /** - * Source - * @description Source of the model; local path, repo_id or url + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - source: components["schemas"]["LocalModelSource"] | components["schemas"]["HFModelSource"] | components["schemas"]["URLModelSource"]; - }; - /** - * ModelLoadCompleteEvent - * @description Event model for model_load_complete - */ - ModelLoadCompleteEvent: { + is_intermediate?: boolean; /** - * Timestamp - * @description The timestamp of the event + * Use Cache + * @description Whether or not to use the cache + * @default true */ - timestamp: number; + use_cache?: boolean; /** - * Config - * @description The model's config + * Width + * @description The width of the entire mask. + * @default null */ - config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]; + width?: number | null; /** - * @description The submodel type, if any + * Height + * @description The height of the entire mask. * @default null */ - submodel_type: components["schemas"]["SubModelType"] | null; - }; - /** - * ModelLoadStartedEvent - * @description Event model for model_load_started - */ - ModelLoadStartedEvent: { + height?: number | null; /** - * Timestamp - * @description The timestamp of the event + * X Left + * @description The left x-coordinate of the rectangular masked region (inclusive). + * @default null */ - timestamp: number; + x_left?: number | null; /** - * Config - * @description The model's config + * Y Top + * @description The top y-coordinate of the rectangular masked region (inclusive). + * @default null */ - config: components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]; + y_top?: number | null; /** - * @description The submodel type, if any + * Rectangle Width + * @description The width of the rectangular masked region. * @default null */ - submodel_type: components["schemas"]["SubModelType"] | null; - }; - /** - * ModelLoaderOutput - * @description Model loader output - */ - ModelLoaderOutput: { + rectangle_width?: number | null; /** - * VAE - * @description VAE + * Rectangle Height + * @description The height of the rectangular masked region. + * @default null */ - vae: components["schemas"]["VAEField"]; + rectangle_height?: number | null; /** * type - * @default model_loader_output + * @default rectangle_mask * @constant */ - type: "model_loader_output"; - /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip: components["schemas"]["CLIPField"]; - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; + type: "rectangle_mask"; }; /** - * ModelRecordChanges - * @description A set of changes to apply to a model. + * RemoteModelFile + * @description Information about a downloadable file that forms part of a model. */ - ModelRecordChanges: { - /** - * Source - * @description original source of the model - */ - source?: string | null; - /** @description type of model source */ - source_type?: components["schemas"]["ModelSourceType"] | null; + RemoteModelFile: { /** - * Source Api Response - * @description metadata from remote source + * Url + * Format: uri + * @description The url to download this model file */ - source_api_response?: string | null; + url: string; /** - * Name - * @description Name of the model. + * Path + * Format: path + * @description The path to the file, relative to the model root */ - name?: string | null; + path: string; /** - * Path - * @description Path to the model. + * Size + * @description The size of this file, in bytes + * @default 0 */ - path?: string | null; + size?: number | null; /** - * Description - * @description Model description + * Sha256 + * @description SHA256 hash of this model (not always available) */ - description?: string | null; - /** @description The base model. */ - base?: components["schemas"]["BaseModelType"] | null; - /** @description Type of model */ - type?: components["schemas"]["ModelType"] | null; + sha256?: string | null; + }; + /** RemoveImagesFromBoardResult */ + RemoveImagesFromBoardResult: { /** - * Key - * @description Database ID for this model + * Affected Boards + * @description The ids of boards affected by the delete operation */ - key?: string | null; + affected_boards: string[]; /** - * Hash - * @description hash of model file + * Removed Images + * @description The image names that were removed from their board */ - hash?: string | null; + removed_images: string[]; + }; + /** RemoveVideosFromBoardResult */ + RemoveVideosFromBoardResult: { /** - * File Size - * @description Size of model file + * Affected Boards + * @description The ids of boards affected by the delete operation */ - file_size?: number | null; + affected_boards: string[]; /** - * Format - * @description format of model file + * Removed Videos + * @description The video ids that were removed from their board */ - format?: string | null; + removed_videos: string[]; + }; + /** + * Resize Latents + * @description Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8. + */ + ResizeLatentsInvocation: { /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - trigger_phrases?: string[] | null; + id: string; /** - * Default Settings - * @description Default settings for this model + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - default_settings?: components["schemas"]["MainModelDefaultSettings"] | components["schemas"]["LoraModelDefaultSettings"] | components["schemas"]["ControlAdapterDefaultSettings"] | null; + is_intermediate?: boolean; /** - * Variant - * @description The variant of the model. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | null; - /** @description The prediction type of the model. */ - prediction_type?: components["schemas"]["SchedulerPredictionType"] | null; + use_cache?: boolean; /** - * Upcast Attention - * @description Whether to upcast attention. + * @description Latents tensor + * @default null */ - upcast_attention?: boolean | null; + latents?: components["schemas"]["LatentsField"] | null; /** - * Config Path - * @description Path to config file for model + * Width + * @description Width of output (px) + * @default null */ - config_path?: string | null; - }; - /** ModelRelationshipBatchRequest */ - ModelRelationshipBatchRequest: { + width?: number | null; /** - * Model Keys - * @description List of model keys to fetch related models for + * Height + * @description Width of output (px) + * @default null */ - model_keys: string[]; - }; - /** ModelRelationshipCreateRequest */ - ModelRelationshipCreateRequest: { + height?: number | null; /** - * Model Key 1 - * @description The key of the first model in the relationship + * Mode + * @description Interpolation mode + * @default bilinear + * @enum {string} */ - model_key_1: string; + mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; /** - * Model Key 2 - * @description The key of the second model in the relationship + * Antialias + * @description Whether or not to apply antialiasing (bilinear or bicubic only) + * @default false */ - model_key_2: string; - }; - /** - * ModelRepoVariant - * @description Various hugging face variants on the diffusers format. - * @enum {string} - */ - ModelRepoVariant: "" | "fp16" | "fp32" | "onnx" | "openvino" | "flax"; - /** - * ModelSourceType - * @description Model source type. - * @enum {string} - */ - ModelSourceType: "path" | "url" | "hf_repo_id"; - /** - * ModelType - * @description Model type. - * @enum {string} - */ - ModelType: "onnx" | "main" | "vae" | "lora" | "control_lora" | "controlnet" | "embedding" | "ip_adapter" | "clip_vision" | "clip_embed" | "t2i_adapter" | "t5_encoder" | "spandrel_image_to_image" | "siglip" | "flux_redux" | "llava_onevision" | "video" | "unknown"; + antialias?: boolean; + /** + * type + * @default lresize + * @constant + */ + type: "lresize"; + }; /** - * ModelVariantType - * @description Variant type. + * ResourceOrigin + * @description The origin of a resource (eg image). + * + * - INTERNAL: The resource was created by the application. + * - EXTERNAL: The resource was not created by the application. + * This may be a user-initiated upload, or an internal application upload (eg Canvas init image). * @enum {string} */ - ModelVariantType: "normal" | "inpaint" | "depth"; - /** - * ModelsList - * @description Return list of configs. - */ - ModelsList: { - /** Models */ - models: (components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"])[]; + ResourceOrigin: "internal" | "external"; + /** RetryItemsResult */ + RetryItemsResult: { + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Retried Item Ids + * @description The IDs of the queue items that were retried + */ + retried_item_ids: number[]; }; /** - * Multiply Integers - * @description Multiplies two numbers + * Round Float + * @description Rounds a float to a specified number of decimal places. */ - MultiplyInvocation: { + RoundInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17518,47 +20922,92 @@ export type components = { */ use_cache?: boolean; /** - * A - * @description The first number + * Value + * @description The float value * @default 0 */ - a?: number; + value?: number; /** - * B - * @description The second number + * Decimals + * @description The number of decimal places * @default 0 */ - b?: number; + decimals?: number; /** * type - * @default mul + * @default round_float * @constant */ - type: "mul"; + type: "round_float"; }; - /** NodeFieldValue */ - NodeFieldValue: { + /** SAMPoint */ + SAMPoint: { /** - * Node Path - * @description The node into which this batch data item will be substituted. + * X + * @description The x-coordinate of the point */ - node_path: string; + x: number; /** - * Field Name - * @description The field into which this batch data item will be substituted. + * Y + * @description The y-coordinate of the point */ - field_name: string; + y: number; + /** @description The label of the point */ + label: components["schemas"]["SAMPointLabel"]; + }; + /** + * SAMPointLabel + * @enum {integer} + */ + SAMPointLabel: -1 | 0 | 1; + /** SAMPointsField */ + SAMPointsField: { /** - * Value - * @description The value to substitute into the node/field. + * Points + * @description The points of the object */ - value: string | number | components["schemas"]["ImageField"]; + points: components["schemas"]["SAMPoint"][]; }; /** - * Create Latent Noise - * @description Generates latent noise. + * SD3ConditioningField + * @description A conditioning tensor primitive value */ - NoiseInvocation: { + SD3ConditioningField: { + /** + * Conditioning Name + * @description The name of conditioning tensor + */ + conditioning_name: string; + }; + /** + * SD3ConditioningOutput + * @description Base class for nodes that output a single SD3 conditioning tensor + */ + SD3ConditioningOutput: { + /** @description Conditioning tensor */ + conditioning: components["schemas"]["SD3ConditioningField"]; + /** + * type + * @default sd3_conditioning_output + * @constant + */ + type: "sd3_conditioning_output"; + }; + /** + * Denoise - SD3 + * @description Run denoising process with a SD3 model. + */ + SD3DenoiseInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17577,65 +21026,134 @@ export type components = { */ use_cache?: boolean; /** - * Seed - * @description Seed for random number generation + * @description Latents tensor + * @default null + */ + latents?: components["schemas"]["LatentsField"] | null; + /** + * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. + * @default null + */ + denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + /** + * Denoising Start + * @description When to start denoising, expressed a percentage of total steps * @default 0 */ - seed?: number; + denoising_start?: number; + /** + * Denoising End + * @description When to stop denoising, expressed a percentage of total steps + * @default 1 + */ + denoising_end?: number; + /** + * Transformer + * @description SD3 model (MMDiTX) to load + * @default null + */ + transformer?: components["schemas"]["TransformerField"] | null; + /** + * @description Positive conditioning tensor + * @default null + */ + positive_conditioning?: components["schemas"]["SD3ConditioningField"] | null; + /** + * @description Negative conditioning tensor + * @default null + */ + negative_conditioning?: components["schemas"]["SD3ConditioningField"] | null; + /** + * CFG Scale + * @description Classifier-Free Guidance scale + * @default 3.5 + */ + cfg_scale?: number | number[]; /** * Width - * @description Width of output (px) - * @default 512 + * @description Width of the generated image. + * @default 1024 */ width?: number; /** * Height - * @description Height of output (px) - * @default 512 + * @description Height of the generated image. + * @default 1024 */ height?: number; /** - * Use Cpu - * @description Use CPU for noise generation (for reproducible results across platforms) - * @default true + * Steps + * @description Number of steps to run + * @default 10 */ - use_cpu?: boolean; + steps?: number; + /** + * Seed + * @description Randomness seed for reproducibility. + * @default 0 + */ + seed?: number; /** * type - * @default noise + * @default sd3_denoise * @constant */ - type: "noise"; + type: "sd3_denoise"; }; /** - * NoiseOutput - * @description Invocation noise output + * Image to Latents - SD3 + * @description Generates latents from an image. */ - NoiseOutput: { - /** @description Noise tensor */ - noise: components["schemas"]["LatentsField"]; + SD3ImageToLatentsInvocation: { /** - * Width - * @description Width of output (px) + * @description The board to save the image to + * @default null */ - width: number; + board?: components["schemas"]["BoardField"] | null; /** - * Height - * @description Height of output (px) + * @description Optional metadata to be saved with the image + * @default null */ - height: number; + metadata?: components["schemas"]["MetadataField"] | null; + /** + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false + */ + is_intermediate?: boolean; + /** + * Use Cache + * @description Whether or not to use the cache + * @default true + */ + use_cache?: boolean; + /** + * @description The image to encode + * @default null + */ + image?: components["schemas"]["ImageField"] | null; + /** + * @description VAE + * @default null + */ + vae?: components["schemas"]["VAEField"] | null; /** * type - * @default noise_output + * @default sd3_i2l * @constant */ - type: "noise_output"; + type: "sd3_i2l"; }; /** - * Normal Map - * @description Generates a normal map. + * Latents to Image - SD3 + * @description Generates an image from latents. */ - NormalMapInvocation: { + SD3LatentsToImageInvocation: { /** * @description The board to save the image to * @default null @@ -17664,201 +21182,115 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process + * @description Latents tensor * @default null */ - image?: components["schemas"]["ImageField"] | null; - /** - * type - * @default normal_map - * @constant - */ - type: "normal_map"; - }; - /** OffsetPaginatedResults[BoardDTO] */ - OffsetPaginatedResults_BoardDTO_: { - /** - * Limit - * @description Limit of items to get - */ - limit: number; - /** - * Offset - * @description Offset from which to retrieve items - */ - offset: number; - /** - * Total - * @description Total number of items in result - */ - total: number; - /** - * Items - * @description Items - */ - items: components["schemas"]["BoardDTO"][]; - }; - /** OffsetPaginatedResults[ImageDTO] */ - OffsetPaginatedResults_ImageDTO_: { - /** - * Limit - * @description Limit of items to get - */ - limit: number; - /** - * Offset - * @description Offset from which to retrieve items - */ - offset: number; - /** - * Total - * @description Total number of items in result - */ - total: number; - /** - * Items - * @description Items - */ - items: components["schemas"]["ImageDTO"][]; - }; - /** OffsetPaginatedResults[VideoDTO] */ - OffsetPaginatedResults_VideoDTO_: { - /** - * Limit - * @description Limit of items to get - */ - limit: number; - /** - * Offset - * @description Offset from which to retrieve items - */ - offset: number; + latents?: components["schemas"]["LatentsField"] | null; /** - * Total - * @description Total number of items in result + * @description VAE + * @default null */ - total: number; + vae?: components["schemas"]["VAEField"] | null; /** - * Items - * @description Items + * type + * @default sd3_l2i + * @constant */ - items: components["schemas"]["VideoDTO"][]; + type: "sd3_l2i"; }; /** - * OutputFieldJSONSchemaExtra - * @description Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor - * during schema parsing and UI rendering. + * Prompt - SDXL + * @description Parse prompt using compel package to conditioning. */ - OutputFieldJSONSchemaExtra: { - field_kind: components["schemas"]["FieldKind"]; + SDXLCompelPromptInvocation: { /** - * Ui Hidden + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this is an intermediate invocation. * @default false */ - ui_hidden: boolean; + is_intermediate?: boolean; /** - * Ui Order - * @default null + * Use Cache + * @description Whether or not to use the cache + * @default true */ - ui_order: number | null; - /** @default null */ - ui_type: components["schemas"]["UIType"] | null; - }; - /** PaginatedResults[WorkflowRecordListItemWithThumbnailDTO] */ - PaginatedResults_WorkflowRecordListItemWithThumbnailDTO_: { + use_cache?: boolean; /** - * Page - * @description Current Page + * Prompt + * @description Prompt to be parsed by Compel to create a conditioning tensor + * @default */ - page: number; + prompt?: string; /** - * Pages - * @description Total number of pages + * Style + * @description Prompt to be parsed by Compel to create a conditioning tensor + * @default */ - pages: number; + style?: string; /** - * Per Page - * @description Number of items per page + * Original Width + * @default 1024 */ - per_page: number; + original_width?: number; /** - * Total - * @description Total number of items in result + * Original Height + * @default 1024 */ - total: number; + original_height?: number; /** - * Items - * @description Items + * Crop Top + * @default 0 */ - items: components["schemas"]["WorkflowRecordListItemWithThumbnailDTO"][]; - }; - /** - * Pair Tile with Image - * @description Pair an image with its tile properties. - */ - PairTileImageInvocation: { + crop_top?: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Crop Left + * @default 0 */ - id: string; + crop_left?: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Target Width + * @default 1024 */ - is_intermediate?: boolean; + target_width?: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Target Height + * @default 1024 */ - use_cache?: boolean; + target_height?: number; /** - * @description The tile image. + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - image?: components["schemas"]["ImageField"] | null; + clip?: components["schemas"]["CLIPField"] | null; /** - * @description The tile properties. + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - tile?: components["schemas"]["Tile"] | null; + clip2?: components["schemas"]["CLIPField"] | null; /** - * type - * @default pair_tile_image - * @constant + * @description A mask defining the region that this conditioning prompt applies to. + * @default null */ - type: "pair_tile_image"; - }; - /** PairTileImageOutput */ - PairTileImageOutput: { - /** @description A tile description with its corresponding image. */ - tile_with_image: components["schemas"]["TileWithImage"]; + mask?: components["schemas"]["TensorField"] | null; /** * type - * @default pair_tile_image_output + * @default sdxl_compel_prompt * @constant */ - type: "pair_tile_image_output"; + type: "sdxl_compel_prompt"; }; /** - * Paste Image into Bounding Box - * @description Paste the source image into the target image at the given bounding box. - * - * The source image must be the same size as the bounding box, and the bounding box must fit within the target image. + * Apply LoRA Collection - SDXL + * @description Applies a collection of SDXL LoRAs to the provided UNet and CLIP models. */ - PasteImageIntoBoundingBoxInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + SDXLLoRACollectionLoader: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17877,42 +21309,41 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to paste + * LoRAs + * @description LoRA models and weights. May be a single LoRA or collection. * @default null */ - source_image?: components["schemas"]["ImageField"] | null; + loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; /** - * @description The image to paste into + * UNet + * @description UNet (scheduler, LoRAs) * @default null */ - target_image?: components["schemas"]["ImageField"] | null; + unet?: components["schemas"]["UNetField"] | null; /** - * @description The bounding box to paste the image into + * CLIP + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - bounding_box?: components["schemas"]["BoundingBoxField"] | null; + clip?: components["schemas"]["CLIPField"] | null; + /** + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null + */ + clip2?: components["schemas"]["CLIPField"] | null; /** * type - * @default paste_image_into_bounding_box + * @default sdxl_lora_collection_loader * @constant */ - type: "paste_image_into_bounding_box"; + type: "sdxl_lora_collection_loader"; }; /** - * PiDiNet Edge Detection - * @description Generates an edge map using PiDiNet. + * Apply LoRA - SDXL + * @description Apply selected lora to unet and text_encoder. */ - PiDiNetEdgeDetectionInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + SDXLLoRALoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -17931,73 +21362,77 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to process + * LoRA + * @description LoRA model to load * @default null */ - image?: components["schemas"]["ImageField"] | null; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** - * Quantize Edges - * @description Whether or not to use safe mode - * @default false + * Weight + * @description The weight at which the LoRA is applied to each model + * @default 0.75 */ - quantize_edges?: boolean; + weight?: number; /** - * Scribble - * @description Whether or not to use scribble mode - * @default false + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - scribble?: boolean; + unet?: components["schemas"]["UNetField"] | null; /** - * type - * @default pidi_edge_detection - * @constant + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - type: "pidi_edge_detection"; - }; - /** PresetData */ - PresetData: { + clip?: components["schemas"]["CLIPField"] | null; /** - * Positive Prompt - * @description Positive prompt + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - positive_prompt: string; + clip2?: components["schemas"]["CLIPField"] | null; /** - * Negative Prompt - * @description Negative prompt + * type + * @default sdxl_lora_loader + * @constant */ - negative_prompt: string; + type: "sdxl_lora_loader"; }; /** - * PresetType - * @enum {string} - */ - PresetType: "user" | "default" | "project"; - /** - * ProgressImage - * @description The progress image sent intermittently during processing + * SDXLLoRALoaderOutput + * @description SDXL LoRA Loader Output */ - ProgressImage: { + SDXLLoRALoaderOutput: { /** - * Width - * @description The effective width of the image in pixels + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - width: number; + unet: components["schemas"]["UNetField"] | null; /** - * Height - * @description The effective height of the image in pixels + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - height: number; + clip: components["schemas"]["CLIPField"] | null; /** - * Dataurl - * @description The image data as a b64 data URL + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - dataURL: string; + clip2: components["schemas"]["CLIPField"] | null; + /** + * type + * @default sdxl_lora_loader_output + * @constant + */ + type: "sdxl_lora_loader_output"; }; /** - * Prompts from File - * @description Loads prompts from a text file + * Main Model - SDXL + * @description Loads an sdxl base model, outputting its submodels. */ - PromptsFromFileInvocation: { + SDXLModelLoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18016,194 +21451,196 @@ export type components = { */ use_cache?: boolean; /** - * File Path - * @description Path to prompt text file - * @default null - */ - file_path?: string | null; - /** - * Pre Prompt - * @description String to prepend to each prompt - * @default null - */ - pre_prompt?: string | null; - /** - * Post Prompt - * @description String to append to each prompt + * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load * @default null */ - post_prompt?: string | null; - /** - * Start Line - * @description Line in the file to start start from - * @default 1 - */ - start_line?: number; - /** - * Max Prompts - * @description Max lines to read from file (0=all) - * @default 1 - */ - max_prompts?: number; + model?: components["schemas"]["ModelIdentifierField"] | null; /** * type - * @default prompt_from_file + * @default sdxl_model_loader * @constant */ - type: "prompt_from_file"; + type: "sdxl_model_loader"; }; /** - * PruneResult - * @description Result of pruning the session queue + * SDXLModelLoaderOutput + * @description SDXL base model loader output */ - PruneResult: { + SDXLModelLoaderOutput: { /** - * Deleted - * @description Number of queue items deleted + * UNet + * @description UNet (scheduler, LoRAs) */ - deleted: number; - }; - /** - * QueueClearedEvent - * @description Event model for queue_cleared - */ - QueueClearedEvent: { + unet: components["schemas"]["UNetField"]; /** - * Timestamp - * @description The timestamp of the event + * CLIP 1 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - timestamp: number; + clip: components["schemas"]["CLIPField"]; /** - * Queue Id - * @description The ID of the queue + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - queue_id: string; + clip2: components["schemas"]["CLIPField"]; + /** + * VAE + * @description VAE + */ + vae: components["schemas"]["VAEField"]; + /** + * type + * @default sdxl_model_loader_output + * @constant + */ + type: "sdxl_model_loader_output"; }; /** - * QueueItemStatusChangedEvent - * @description Event model for queue_item_status_changed + * Prompt - SDXL Refiner + * @description Parse prompt using compel package to conditioning. */ - QueueItemStatusChangedEvent: { + SDXLRefinerCompelPromptInvocation: { /** - * Timestamp - * @description The timestamp of the event + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - timestamp: number; + id: string; /** - * Queue Id - * @description The ID of the queue + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - queue_id: string; + is_intermediate?: boolean; /** - * Item Id - * @description The ID of the queue item + * Use Cache + * @description Whether or not to use the cache + * @default true */ - item_id: number; + use_cache?: boolean; /** - * Batch Id - * @description The ID of the queue batch + * Style + * @description Prompt to be parsed by Compel to create a conditioning tensor + * @default */ - batch_id: string; + style?: string; /** - * Origin - * @description The origin of the queue item - * @default null + * Original Width + * @default 1024 */ - origin: string | null; + original_width?: number; /** - * Destination - * @description The destination of the queue item - * @default null + * Original Height + * @default 1024 */ - destination: string | null; + original_height?: number; /** - * Status - * @description The new status of the queue item - * @enum {string} + * Crop Top + * @default 0 */ - status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + crop_top?: number; /** - * Error Type - * @description The error type, if any - * @default null + * Crop Left + * @default 0 */ - error_type: string | null; + crop_left?: number; /** - * Error Message - * @description The error message, if any - * @default null + * Aesthetic Score + * @description The aesthetic score to apply to the conditioning tensor + * @default 6 */ - error_message: string | null; + aesthetic_score?: number; /** - * Error Traceback - * @description The error traceback, if any + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - error_traceback: string | null; - /** - * Created At - * @description The timestamp when the queue item was created - */ - created_at: string; + clip2?: components["schemas"]["CLIPField"] | null; /** - * Updated At - * @description The timestamp when the queue item was last updated + * type + * @default sdxl_refiner_compel_prompt + * @constant */ - updated_at: string; + type: "sdxl_refiner_compel_prompt"; + }; + /** + * Refiner Model - SDXL + * @description Loads an sdxl refiner model, outputting its submodels. + */ + SDXLRefinerModelLoaderInvocation: { /** - * Started At - * @description The timestamp when the queue item was started - * @default null + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - started_at: string | null; + id: string; /** - * Completed At - * @description The timestamp when the queue item was completed - * @default null + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - completed_at: string | null; - /** @description The status of the batch */ - batch_status: components["schemas"]["BatchStatus"]; - /** @description The status of the queue */ - queue_status: components["schemas"]["SessionQueueStatus"]; + is_intermediate?: boolean; /** - * Session Id - * @description The ID of the session (aka graph execution state) + * Use Cache + * @description Whether or not to use the cache + * @default true */ - session_id: string; + use_cache?: boolean; /** - * Credits - * @description The total credits used for this queue item + * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load * @default null */ - credits: number | null; + model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * type + * @default sdxl_refiner_model_loader + * @constant + */ + type: "sdxl_refiner_model_loader"; }; /** - * QueueItemsRetriedEvent - * @description Event model for queue_items_retried + * SDXLRefinerModelLoaderOutput + * @description SDXL refiner model loader output */ - QueueItemsRetriedEvent: { + SDXLRefinerModelLoaderOutput: { /** - * Timestamp - * @description The timestamp of the event + * UNet + * @description UNet (scheduler, LoRAs) */ - timestamp: number; + unet: components["schemas"]["UNetField"]; /** - * Queue Id - * @description The ID of the queue + * CLIP 2 + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - queue_id: string; + clip2: components["schemas"]["CLIPField"]; /** - * Retried Item Ids - * @description The IDs of the queue items that were retried + * VAE + * @description VAE */ - retried_item_ids: number[]; + vae: components["schemas"]["VAEField"]; + /** + * type + * @default sdxl_refiner_model_loader_output + * @constant + */ + type: "sdxl_refiner_model_loader_output"; }; /** - * Random Float - * @description Outputs a single random float + * SQLiteDirection + * @enum {string} */ - RandomFloatInvocation: { + SQLiteDirection: "ASC" | "DESC"; + /** + * Save Image + * @description Saves an image. Unlike an image primitive, this invocation stores a copy of the image. + */ + SaveImageInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18222,35 +21659,22 @@ export type components = { */ use_cache?: boolean; /** - * Low - * @description The inclusive low value - * @default 0 - */ - low?: number; - /** - * High - * @description The exclusive high value - * @default 1 - */ - high?: number; - /** - * Decimals - * @description The number of decimal places to round to - * @default 2 + * @description The image to process + * @default null */ - decimals?: number; + image?: components["schemas"]["ImageField"] | null; /** * type - * @default rand_float + * @default save_image * @constant */ - type: "rand_float"; + type: "save_image"; }; /** - * Random Integer - * @description Outputs a single random integer. + * Scale Latents + * @description Scales latents by a given factor. */ - RandomIntInvocation: { + ScaleLatentsInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18265,33 +21689,45 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ use_cache?: boolean; /** - * Low - * @description The inclusive low value - * @default 0 + * @description Latents tensor + * @default null */ - low?: number; + latents?: components["schemas"]["LatentsField"] | null; /** - * High - * @description The exclusive high value - * @default 2147483647 + * Scale Factor + * @description The factor by which to scale + * @default null */ - high?: number; + scale_factor?: number | null; + /** + * Mode + * @description Interpolation mode + * @default bilinear + * @enum {string} + */ + mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; + /** + * Antialias + * @description Whether or not to apply antialiasing (bilinear or bicubic only) + * @default false + */ + antialias?: boolean; /** * type - * @default rand_int + * @default lscale * @constant */ - type: "rand_int"; + type: "lscale"; }; /** - * Random Range - * @description Creates a collection of random numbers + * Scheduler + * @description Selects a scheduler. */ - RandomRangeInvocation: { + SchedulerInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18306,45 +21742,49 @@ export type components = { /** * Use Cache * @description Whether or not to use the cache - * @default false + * @default true */ use_cache?: boolean; /** - * Low - * @description The inclusive low value - * @default 0 - */ - low?: number; - /** - * High - * @description The exclusive high value - * @default 2147483647 + * Scheduler + * @description Scheduler to use during inference + * @default euler + * @enum {string} */ - high?: number; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** - * Size - * @description The number of values to generate - * @default 1 + * type + * @default scheduler + * @constant */ - size?: number; + type: "scheduler"; + }; + /** SchedulerOutput */ + SchedulerOutput: { /** - * Seed - * @description The seed for the RNG (omit for random) - * @default 0 + * Scheduler + * @description Scheduler to use during inference + * @enum {string} */ - seed?: number; + scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** * type - * @default random_range + * @default scheduler_output * @constant */ - type: "random_range"; + type: "scheduler_output"; }; /** - * Integer Range - * @description Creates a range of numbers from start to stop with step + * SchedulerPredictionType + * @description Scheduler prediction type. + * @enum {string} */ - RangeInvocation: { + SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; + /** + * Main Model - SD3 + * @description Loads a SD3 base model, outputting its submodels. + */ + Sd3ModelLoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18362,36 +21802,81 @@ export type components = { * @default true */ use_cache?: boolean; + /** @description SD3 model (MMDiTX) to load */ + model: components["schemas"]["ModelIdentifierField"]; + /** + * T5 Encoder + * @description T5 tokenizer and text encoder + * @default null + */ + t5_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * CLIP L Encoder + * @description CLIP Embed loader + * @default null + */ + clip_l_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * CLIP G Encoder + * @description CLIP-G Embed loader + * @default null + */ + clip_g_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * VAE + * @description VAE model to load + * @default null + */ + vae_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * type + * @default sd3_model_loader + * @constant + */ + type: "sd3_model_loader"; + }; + /** + * Sd3ModelLoaderOutput + * @description SD3 base model loader output. + */ + Sd3ModelLoaderOutput: { + /** + * Transformer + * @description Transformer + */ + transformer: components["schemas"]["TransformerField"]; + /** + * CLIP L + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + */ + clip_l: components["schemas"]["CLIPField"]; /** - * Start - * @description The start of the range - * @default 0 + * CLIP G + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count */ - start?: number; + clip_g: components["schemas"]["CLIPField"]; /** - * Stop - * @description The stop of the range - * @default 10 + * T5 Encoder + * @description T5 tokenizer and text encoder */ - stop?: number; + t5_encoder: components["schemas"]["T5EncoderField"]; /** - * Step - * @description The step of the range - * @default 1 + * VAE + * @description VAE */ - step?: number; + vae: components["schemas"]["VAEField"]; /** * type - * @default range + * @default sd3_model_loader_output * @constant */ - type: "range"; + type: "sd3_model_loader_output"; }; /** - * Integer Range of Size - * @description Creates a range from start to start + (size * step) incremented by step + * Prompt - SD3 + * @description Encodes and preps a prompt for a SD3 image. */ - RangeOfSizeInvocation: { + Sd3TextEncoderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18410,40 +21895,41 @@ export type components = { */ use_cache?: boolean; /** - * Start - * @description The start of the range - * @default 0 + * CLIP L + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - start?: number; + clip_l?: components["schemas"]["CLIPField"] | null; /** - * Size - * @description The number of values - * @default 1 + * CLIP G + * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @default null */ - size?: number; + clip_g?: components["schemas"]["CLIPField"] | null; /** - * Step - * @description The step of the range - * @default 1 + * T5Encoder + * @description T5 tokenizer and text encoder + * @default null */ - step?: number; + t5_encoder?: components["schemas"]["T5EncoderField"] | null; + /** + * Prompt + * @description Text prompt to encode. + * @default null + */ + prompt?: string | null; /** * type - * @default range_of_size + * @default sd3_text_encoder * @constant */ - type: "range_of_size"; + type: "sd3_text_encoder"; }; /** - * Create Rectangle Mask - * @description Create a rectangular mask. + * Apply Seamless - SD1.5, SDXL + * @description Applies the seamless transformation to the Model UNet and VAE. */ - RectangleMaskInvocation: { - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; + SeamlessModeInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18462,108 +21948,65 @@ export type components = { */ use_cache?: boolean; /** - * Width - * @description The width of the entire mask. - * @default null - */ - width?: number | null; - /** - * Height - * @description The height of the entire mask. - * @default null - */ - height?: number | null; - /** - * X Left - * @description The left x-coordinate of the rectangular masked region (inclusive). + * UNet + * @description UNet (scheduler, LoRAs) * @default null */ - x_left?: number | null; + unet?: components["schemas"]["UNetField"] | null; /** - * Y Top - * @description The top y-coordinate of the rectangular masked region (inclusive). + * VAE + * @description VAE model to load * @default null */ - y_top?: number | null; + vae?: components["schemas"]["VAEField"] | null; /** - * Rectangle Width - * @description The width of the rectangular masked region. - * @default null + * Seamless Y + * @description Specify whether Y axis is seamless + * @default true */ - rectangle_width?: number | null; + seamless_y?: boolean; /** - * Rectangle Height - * @description The height of the rectangular masked region. - * @default null + * Seamless X + * @description Specify whether X axis is seamless + * @default true */ - rectangle_height?: number | null; + seamless_x?: boolean; /** * type - * @default rectangle_mask + * @default seamless * @constant */ - type: "rectangle_mask"; + type: "seamless"; }; /** - * RemoteModelFile - * @description Information about a downloadable file that forms part of a model. + * SeamlessModeOutput + * @description Modified Seamless Model output */ - RemoteModelFile: { - /** - * Url - * Format: uri - * @description The url to download this model file - */ - url: string; - /** - * Path - * Format: path - * @description The path to the file, relative to the model root - */ - path: string; - /** - * Size - * @description The size of this file, in bytes - * @default 0 - */ - size?: number | null; - /** - * Sha256 - * @description SHA256 hash of this model (not always available) - */ - sha256?: string | null; - }; - /** RemoveImagesFromBoardResult */ - RemoveImagesFromBoardResult: { - /** - * Affected Boards - * @description The ids of boards affected by the delete operation - */ - affected_boards: string[]; + SeamlessModeOutput: { /** - * Removed Images - * @description The image names that were removed from their board + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - removed_images: string[]; - }; - /** RemoveVideosFromBoardResult */ - RemoveVideosFromBoardResult: { + unet: components["schemas"]["UNetField"] | null; /** - * Affected Boards - * @description The ids of boards affected by the delete operation + * VAE + * @description VAE + * @default null */ - affected_boards: string[]; + vae: components["schemas"]["VAEField"] | null; /** - * Removed Videos - * @description The video ids that were removed from their board + * type + * @default seamless_output + * @constant */ - removed_videos: string[]; + type: "seamless_output"; }; /** - * Resize Latents - * @description Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8. + * Segment Anything + * @description Runs a Segment Anything Model (SAM or SAM2). */ - ResizeLatentsInvocation: { + SegmentAnythingInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18582,369 +22025,284 @@ export type components = { */ use_cache?: boolean; /** - * @description Latents tensor - * @default null - */ - latents?: components["schemas"]["LatentsField"] | null; - /** - * Width - * @description Width of output (px) + * Model + * @description The Segment Anything model to use (SAM or SAM2). * @default null */ - width?: number | null; + model?: ("segment-anything-base" | "segment-anything-large" | "segment-anything-huge" | "segment-anything-2-tiny" | "segment-anything-2-small" | "segment-anything-2-base" | "segment-anything-2-large") | null; /** - * Height - * @description Width of output (px) + * @description The image to segment. * @default null */ - height?: number | null; - /** - * Mode - * @description Interpolation mode - * @default bilinear - * @enum {string} - */ - mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; - /** - * Antialias - * @description Whether or not to apply antialiasing (bilinear or bicubic only) - * @default false - */ - antialias?: boolean; - /** - * type - * @default lresize - * @constant - */ - type: "lresize"; - }; - /** - * ResourceOrigin - * @description The origin of a resource (eg image). - * - * - INTERNAL: The resource was created by the application. - * - EXTERNAL: The resource was not created by the application. - * This may be a user-initiated upload, or an internal application upload (eg Canvas init image). - * @enum {string} - */ - ResourceOrigin: "internal" | "external"; - /** RetryItemsResult */ - RetryItemsResult: { - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Retried Item Ids - * @description The IDs of the queue items that were retried - */ - retried_item_ids: number[]; - }; - /** - * Round Float - * @description Rounds a float to a specified number of decimal places. - */ - RoundInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; + image?: components["schemas"]["ImageField"] | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Bounding Boxes + * @description The bounding boxes to prompt the model with. + * @default null */ - is_intermediate?: boolean; + bounding_boxes?: components["schemas"]["BoundingBoxField"][] | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Point Lists + * @description The list of point lists to prompt the model with. Each list of points represents a single object. + * @default null */ - use_cache?: boolean; + point_lists?: components["schemas"]["SAMPointsField"][] | null; /** - * Value - * @description The float value - * @default 0 + * Apply Polygon Refinement + * @description Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging). + * @default true */ - value?: number; + apply_polygon_refinement?: boolean; /** - * Decimals - * @description The number of decimal places - * @default 0 + * Mask Filter + * @description The filtering to apply to the detected masks before merging them into a final output. + * @default all + * @enum {string} */ - decimals?: number; + mask_filter?: "all" | "largest" | "highest_box_score"; /** * type - * @default round_float + * @default segment_anything * @constant */ - type: "round_float"; + type: "segment_anything"; }; - /** SAMPoint */ - SAMPoint: { + /** SessionProcessorStatus */ + SessionProcessorStatus: { /** - * X - * @description The x-coordinate of the point + * Is Started + * @description Whether the session processor is started */ - x: number; + is_started: boolean; /** - * Y - * @description The y-coordinate of the point + * Is Processing + * @description Whether a session is being processed */ - y: number; - /** @description The label of the point */ - label: components["schemas"]["SAMPointLabel"]; + is_processing: boolean; }; /** - * SAMPointLabel - * @enum {integer} + * SessionQueueAndProcessorStatus + * @description The overall status of session queue and processor */ - SAMPointLabel: -1 | 0 | 1; - /** SAMPointsField */ - SAMPointsField: { + SessionQueueAndProcessorStatus: { + queue: components["schemas"]["SessionQueueStatus"]; + processor: components["schemas"]["SessionProcessorStatus"]; + }; + /** SessionQueueCountsByDestination */ + SessionQueueCountsByDestination: { /** - * Points - * @description The points of the object + * Queue Id + * @description The ID of the queue */ - points: components["schemas"]["SAMPoint"][]; - }; - /** - * SD3ConditioningField - * @description A conditioning tensor primitive value - */ - SD3ConditioningField: { + queue_id: string; /** - * Conditioning Name - * @description The name of conditioning tensor + * Destination + * @description The destination of queue items included in this status */ - conditioning_name: string; - }; - /** - * SD3ConditioningOutput - * @description Base class for nodes that output a single SD3 conditioning tensor - */ - SD3ConditioningOutput: { - /** @description Conditioning tensor */ - conditioning: components["schemas"]["SD3ConditioningField"]; + destination: string; /** - * type - * @default sd3_conditioning_output - * @constant + * Pending + * @description Number of queue items with status 'pending' for the destination */ - type: "sd3_conditioning_output"; - }; - /** - * Denoise - SD3 - * @description Run denoising process with a SD3 model. - */ - SD3DenoiseInvocation: { + pending: number; /** - * @description The board to save the image to - * @default null + * In Progress + * @description Number of queue items with status 'in_progress' for the destination */ - board?: components["schemas"]["BoardField"] | null; + in_progress: number; /** - * @description Optional metadata to be saved with the image - * @default null + * Completed + * @description Number of queue items with status 'complete' for the destination */ - metadata?: components["schemas"]["MetadataField"] | null; + completed: number; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Failed + * @description Number of queue items with status 'error' for the destination */ - id: string; + failed: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Canceled + * @description Number of queue items with status 'canceled' for the destination */ - is_intermediate?: boolean; + canceled: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Total + * @description Total number of queue items for the destination */ - use_cache?: boolean; + total: number; + }; + /** + * SessionQueueItem + * @description Session queue item without the full graph. Used for serialization. + */ + SessionQueueItem: { /** - * @description Latents tensor - * @default null + * Item Id + * @description The identifier of the session queue item */ - latents?: components["schemas"]["LatentsField"] | null; + item_id: number; /** - * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved. - * @default null + * Status + * @description The status of this queue item + * @default pending + * @enum {string} */ - denoise_mask?: components["schemas"]["DenoiseMaskField"] | null; + status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; /** - * Denoising Start - * @description When to start denoising, expressed a percentage of total steps + * Priority + * @description The priority of this queue item * @default 0 */ - denoising_start?: number; + priority: number; /** - * Denoising End - * @description When to stop denoising, expressed a percentage of total steps - * @default 1 + * Batch Id + * @description The ID of the batch associated with this queue item */ - denoising_end?: number; + batch_id: string; /** - * Transformer - * @description SD3 model (MMDiTX) to load - * @default null + * Origin + * @description The origin of this queue item. This data is used by the frontend to determine how to handle results. */ - transformer?: components["schemas"]["TransformerField"] | null; + origin?: string | null; /** - * @description Positive conditioning tensor - * @default null + * Destination + * @description The origin of this queue item. This data is used by the frontend to determine how to handle results */ - positive_conditioning?: components["schemas"]["SD3ConditioningField"] | null; + destination?: string | null; /** - * @description Negative conditioning tensor - * @default null + * Session Id + * @description The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed. */ - negative_conditioning?: components["schemas"]["SD3ConditioningField"] | null; + session_id: string; /** - * CFG Scale - * @description Classifier-Free Guidance scale - * @default 3.5 + * Error Type + * @description The error type if this queue item errored */ - cfg_scale?: number | number[]; + error_type?: string | null; /** - * Width - * @description Width of the generated image. - * @default 1024 + * Error Message + * @description The error message if this queue item errored */ - width?: number; + error_message?: string | null; /** - * Height - * @description Height of the generated image. - * @default 1024 + * Error Traceback + * @description The error traceback if this queue item errored */ - height?: number; + error_traceback?: string | null; /** - * Steps - * @description Number of steps to run - * @default 10 + * Created At + * @description When this queue item was created */ - steps?: number; + created_at: string; /** - * Seed - * @description Randomness seed for reproducibility. - * @default 0 + * Updated At + * @description When this queue item was updated */ - seed?: number; + updated_at: string; /** - * type - * @default sd3_denoise - * @constant + * Started At + * @description When this queue item was started */ - type: "sd3_denoise"; - }; - /** - * Image to Latents - SD3 - * @description Generates latents from an image. - */ - SD3ImageToLatentsInvocation: { + started_at?: string | null; /** - * @description The board to save the image to - * @default null + * Completed At + * @description When this queue item was completed */ - board?: components["schemas"]["BoardField"] | null; + completed_at?: string | null; /** - * @description Optional metadata to be saved with the image - * @default null + * Queue Id + * @description The id of the queue with which this item is associated */ - metadata?: components["schemas"]["MetadataField"] | null; + queue_id: string; + /** + * Field Values + * @description The field values that were used for this queue item + */ + field_values?: components["schemas"]["NodeFieldValue"][] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Retried From Item Id + * @description The item_id of the queue item that this item was retried from */ - id: string; + retried_from_item_id?: number | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. + * Is Api Validation Run + * @description Whether this queue item is an API validation run. * @default false */ - is_intermediate?: boolean; + is_api_validation_run?: boolean; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Published Workflow Id + * @description The ID of the published workflow associated with this queue item */ - use_cache?: boolean; + published_workflow_id?: string | null; /** - * @description The image to encode - * @default null + * Credits + * @description The total credits used for this queue item */ - image?: components["schemas"]["ImageField"] | null; + credits?: number | null; + /** @description The fully-populated session to be executed */ + session: components["schemas"]["GraphExecutionState"]; + /** @description The workflow associated with this queue item */ + workflow?: components["schemas"]["WorkflowWithoutID"] | null; + }; + /** SessionQueueStatus */ + SessionQueueStatus: { /** - * @description VAE - * @default null + * Queue Id + * @description The ID of the queue */ - vae?: components["schemas"]["VAEField"] | null; + queue_id: string; /** - * type - * @default sd3_i2l - * @constant + * Item Id + * @description The current queue item id */ - type: "sd3_i2l"; - }; - /** - * Latents to Image - SD3 - * @description Generates an image from latents. - */ - SD3LatentsToImageInvocation: { + item_id: number | null; /** - * @description The board to save the image to - * @default null + * Batch Id + * @description The current queue item's batch id */ - board?: components["schemas"]["BoardField"] | null; + batch_id: string | null; /** - * @description Optional metadata to be saved with the image - * @default null + * Session Id + * @description The current queue item's session id */ - metadata?: components["schemas"]["MetadataField"] | null; + session_id: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Pending + * @description Number of queue items with status 'pending' */ - id: string; + pending: number; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * In Progress + * @description Number of queue items with status 'in_progress' */ - is_intermediate?: boolean; + in_progress: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Completed + * @description Number of queue items with status 'complete' */ - use_cache?: boolean; + completed: number; /** - * @description Latents tensor - * @default null + * Failed + * @description Number of queue items with status 'error' */ - latents?: components["schemas"]["LatentsField"] | null; + failed: number; /** - * @description VAE - * @default null + * Canceled + * @description Number of queue items with status 'canceled' */ - vae?: components["schemas"]["VAEField"] | null; + canceled: number; /** - * type - * @default sd3_l2i - * @constant + * Total + * @description Total number of queue items */ - type: "sd3_l2i"; + total: number; }; /** - * Prompt - SDXL - * @description Parse prompt using compel package to conditioning. + * Show Image + * @description Displays a provided image using the OS image viewer, and passes it forward in the pipeline. */ - SDXLCompelPromptInvocation: { + ShowImageInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -18963,129 +22321,110 @@ export type components = { */ use_cache?: boolean; /** - * Prompt - * @description Prompt to be parsed by Compel to create a conditioning tensor - * @default - */ - prompt?: string; - /** - * Style - * @description Prompt to be parsed by Compel to create a conditioning tensor - * @default - */ - style?: string; - /** - * Original Width - * @default 1024 - */ - original_width?: number; - /** - * Original Height - * @default 1024 - */ - original_height?: number; - /** - * Crop Top - * @default 0 - */ - crop_top?: number; - /** - * Crop Left - * @default 0 + * @description The image to show + * @default null */ - crop_left?: number; + image?: components["schemas"]["ImageField"] | null; /** - * Target Width - * @default 1024 + * type + * @default show_image + * @constant */ - target_width?: number; + type: "show_image"; + }; + /** + * SigLIP_Diffusers_Config + * @description Model config for SigLIP. + */ + SigLIP_Diffusers_Config: { /** - * Target Height - * @default 1024 + * Key + * @description A unique key for this model. */ - target_height?: number; + key: string; /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Hash + * @description The hash of the model file(s). */ - clip?: components["schemas"]["CLIPField"] | null; + hash: string; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - clip2?: components["schemas"]["CLIPField"] | null; + path: string; /** - * @description A mask defining the region that this conditioning prompt applies to. - * @default null + * File Size + * @description The size of the model in bytes. */ - mask?: components["schemas"]["TensorField"] | null; + file_size: number; /** - * type - * @default sdxl_compel_prompt - * @constant + * Name + * @description Name of the model. */ - type: "sdxl_compel_prompt"; - }; - /** - * Apply LoRA Collection - SDXL - * @description Applies a collection of SDXL LoRAs to the provided UNet and CLIP models. - */ - SDXLLoRACollectionLoader: { + name: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Description + * @description Model description */ - id: string; + description: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source + * @description The original source of the model (path, URL or repo_id). */ - is_intermediate?: boolean; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - use_cache?: boolean; + source_api_response: string | null; /** - * LoRAs - * @description LoRA models and weights. May be a single LoRA or collection. - * @default null + * Cover Image + * @description Url for image to preview model */ - loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null; + cover_image: string | null; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Usage Info + * @description Usage information for this model */ - unet?: components["schemas"]["UNetField"] | null; + usage_info: string | null; /** - * CLIP - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Format + * @default diffusers + * @constant */ - clip?: components["schemas"]["CLIPField"] | null; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Type + * @default siglip + * @constant */ - clip2?: components["schemas"]["CLIPField"] | null; + type: "siglip"; /** - * type - * @default sdxl_lora_collection_loader + * Base + * @default any * @constant */ - type: "sdxl_lora_collection_loader"; + base: "any"; }; /** - * Apply LoRA - SDXL - * @description Apply selected lora to unet and text_encoder. + * Image-to-Image (Autoscale) + * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel) until the target scale is reached. */ - SDXLLoRALoaderInvocation: { + SpandrelImageToImageAutoscaleInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19104,77 +22443,56 @@ export type components = { */ use_cache?: boolean; /** - * LoRA - * @description LoRA model to load + * @description The input image * @default null */ - lora?: components["schemas"]["ModelIdentifierField"] | null; + image?: components["schemas"]["ImageField"] | null; /** - * Weight - * @description The weight at which the LoRA is applied to each model - * @default 0.75 + * Image-to-Image Model + * @description Image-to-Image model + * @default null */ - weight?: number; + image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Tile Size + * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. + * @default 512 */ - unet?: components["schemas"]["UNetField"] | null; + tile_size?: number; /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * type + * @default spandrel_image_to_image_autoscale + * @constant */ - clip?: components["schemas"]["CLIPField"] | null; + type: "spandrel_image_to_image_autoscale"; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Scale + * @description The final scale of the output image. If the model does not upscale the image, this will be ignored. + * @default 4 */ - clip2?: components["schemas"]["CLIPField"] | null; + scale?: number; /** - * type - * @default sdxl_lora_loader - * @constant + * Fit To Multiple Of 8 + * @description If true, the output image will be resized to the nearest multiple of 8 in both dimensions. + * @default false */ - type: "sdxl_lora_loader"; + fit_to_multiple_of_8?: boolean; }; /** - * SDXLLoRALoaderOutput - * @description SDXL LoRA Loader Output + * Image-to-Image + * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel). */ - SDXLLoRALoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet: components["schemas"]["UNetField"] | null; + SpandrelImageToImageInvocation: { /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @description The board to save the image to * @default null */ - clip: components["schemas"]["CLIPField"] | null; + board?: components["schemas"]["BoardField"] | null; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * @description Optional metadata to be saved with the image * @default null */ - clip2: components["schemas"]["CLIPField"] | null; - /** - * type - * @default sdxl_lora_loader_output - * @constant - */ - type: "sdxl_lora_loader_output"; - }; - /** - * Main Model - SDXL - * @description Loads an sdxl base model, outputting its submodels. - */ - SDXLModelLoaderInvocation: { + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19193,230 +22511,220 @@ export type components = { */ use_cache?: boolean; /** - * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load + * @description The input image * @default null */ - model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * type - * @default sdxl_model_loader - * @constant - */ - type: "sdxl_model_loader"; - }; - /** - * SDXLModelLoaderOutput - * @description SDXL base model loader output - */ - SDXLModelLoaderOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - */ - unet: components["schemas"]["UNetField"]; - /** - * CLIP 1 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip: components["schemas"]["CLIPField"]; + image?: components["schemas"]["ImageField"] | null; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Image-to-Image Model + * @description Image-to-Image model + * @default null */ - clip2: components["schemas"]["CLIPField"]; + image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * VAE - * @description VAE + * Tile Size + * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. + * @default 512 */ - vae: components["schemas"]["VAEField"]; + tile_size?: number; /** * type - * @default sdxl_model_loader_output + * @default spandrel_image_to_image * @constant */ - type: "sdxl_model_loader_output"; + type: "spandrel_image_to_image"; }; /** - * Prompt - SDXL Refiner - * @description Parse prompt using compel package to conditioning. + * Spandrel_Checkpoint_Config + * @description Model config for Spandrel Image to Image models. */ - SDXLRefinerCompelPromptInvocation: { + Spandrel_Checkpoint_Config: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Key + * @description A unique key for this model. */ - id: string; + key: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Hash + * @description The hash of the model file(s). */ - is_intermediate?: boolean; + hash: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - use_cache?: boolean; + path: string; /** - * Style - * @description Prompt to be parsed by Compel to create a conditioning tensor - * @default + * File Size + * @description The size of the model in bytes. */ - style?: string; + file_size: number; /** - * Original Width - * @default 1024 + * Name + * @description Name of the model. */ - original_width?: number; + name: string; /** - * Original Height - * @default 1024 + * Description + * @description Model description */ - original_height?: number; + description: string | null; /** - * Crop Top - * @default 0 + * Source + * @description The original source of the model (path, URL or repo_id). */ - crop_top?: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Crop Left - * @default 0 + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - crop_left?: number; + source_api_response: string | null; /** - * Aesthetic Score - * @description The aesthetic score to apply to the conditioning tensor - * @default 6 + * Cover Image + * @description Url for image to preview model */ - aesthetic_score?: number; + cover_image: string | null; /** - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null + * Usage Info + * @description Usage information for this model */ - clip2?: components["schemas"]["CLIPField"] | null; + usage_info: string | null; /** - * type - * @default sdxl_refiner_compel_prompt + * Base + * @default any * @constant */ - type: "sdxl_refiner_compel_prompt"; - }; - /** - * Refiner Model - SDXL - * @description Loads an sdxl refiner model, outputting its submodels. - */ - SDXLRefinerModelLoaderInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; + base: "any"; /** - * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load - * @default null + * Type + * @default spandrel_image_to_image + * @constant */ - model?: components["schemas"]["ModelIdentifierField"] | null; + type: "spandrel_image_to_image"; /** - * type - * @default sdxl_refiner_model_loader + * Format + * @default checkpoint * @constant */ - type: "sdxl_refiner_model_loader"; + format: "checkpoint"; }; - /** - * SDXLRefinerModelLoaderOutput - * @description SDXL refiner model loader output - */ - SDXLRefinerModelLoaderOutput: { + /** StarredImagesResult */ + StarredImagesResult: { /** - * UNet - * @description UNet (scheduler, LoRAs) + * Affected Boards + * @description The ids of boards affected by the delete operation */ - unet: components["schemas"]["UNetField"]; + affected_boards: string[]; /** - * CLIP 2 - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count + * Starred Images + * @description The names of the images that were starred */ - clip2: components["schemas"]["CLIPField"]; + starred_images: string[]; + }; + /** StarredVideosResult */ + StarredVideosResult: { /** - * VAE - * @description VAE + * Affected Boards + * @description The ids of boards affected by the delete operation */ - vae: components["schemas"]["VAEField"]; + affected_boards: string[]; /** - * type - * @default sdxl_refiner_model_loader_output - * @constant + * Starred Videos + * @description The ids of the videos that were starred */ - type: "sdxl_refiner_model_loader_output"; + starred_videos: string[]; }; - /** - * SQLiteDirection - * @enum {string} - */ - SQLiteDirection: "ASC" | "DESC"; - /** - * Save Image - * @description Saves an image. Unlike an image primitive, this invocation stores a copy of the image. - */ - SaveImageInvocation: { + /** StarterModel */ + StarterModel: { + /** Description */ + description: string; + /** Source */ + source: string; + /** Name */ + name: string; + base: components["schemas"]["BaseModelType"]; + type: components["schemas"]["ModelType"]; + format?: components["schemas"]["ModelFormat"] | null; /** - * @description The board to save the image to - * @default null + * Is Installed + * @default false */ - board?: components["schemas"]["BoardField"] | null; + is_installed?: boolean; /** - * @description Optional metadata to be saved with the image - * @default null + * Previous Names + * @default [] */ - metadata?: components["schemas"]["MetadataField"] | null; + previous_names?: string[]; + /** Dependencies */ + dependencies?: components["schemas"]["StarterModelWithoutDependencies"][] | null; + }; + /** StarterModelBundle */ + StarterModelBundle: { + /** Name */ + name: string; + /** Models */ + models: components["schemas"]["StarterModel"][]; + }; + /** StarterModelResponse */ + StarterModelResponse: { + /** Starter Models */ + starter_models: components["schemas"]["StarterModel"][]; + /** Starter Bundles */ + starter_bundles: { + [key: string]: components["schemas"]["StarterModelBundle"]; + }; + }; + /** StarterModelWithoutDependencies */ + StarterModelWithoutDependencies: { + /** Description */ + description: string; + /** Source */ + source: string; + /** Name */ + name: string; + base: components["schemas"]["BaseModelType"]; + type: components["schemas"]["ModelType"]; + format?: components["schemas"]["ModelFormat"] | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Is Installed + * @default false */ - id: string; + is_installed?: boolean; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Previous Names + * @default [] */ - is_intermediate?: boolean; + previous_names?: string[]; + }; + /** + * String2Output + * @description Base class for invocations that output two strings + */ + String2Output: { /** - * Use Cache - * @description Whether or not to use the cache - * @default false + * String 1 + * @description string 1 */ - use_cache?: boolean; + string_1: string; /** - * @description The image to process - * @default null + * String 2 + * @description string 2 */ - image?: components["schemas"]["ImageField"] | null; + string_2: string; /** * type - * @default save_image + * @default string_2_output * @constant */ - type: "save_image"; + type: "string_2_output"; }; /** - * Scale Latents - * @description Scales latents by a given factor. + * String Batch + * @description Create a batched generation, where the workflow is executed once for each string in the batch. */ - ScaleLatentsInvocation: { + StringBatchInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19435,41 +22743,30 @@ export type components = { */ use_cache?: boolean; /** - * @description Latents tensor - * @default null - */ - latents?: components["schemas"]["LatentsField"] | null; - /** - * Scale Factor - * @description The factor by which to scale - * @default null - */ - scale_factor?: number | null; - /** - * Mode - * @description Interpolation mode - * @default bilinear + * Batch Group + * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. + * @default None * @enum {string} */ - mode?: "nearest" | "linear" | "bilinear" | "bicubic" | "trilinear" | "area" | "nearest-exact"; + batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; /** - * Antialias - * @description Whether or not to apply antialiasing (bilinear or bicubic only) - * @default false + * Strings + * @description The strings to batch over + * @default null */ - antialias?: boolean; + strings?: string[] | null; /** * type - * @default lscale + * @default string_batch * @constant */ - type: "lscale"; + type: "string_batch"; }; /** - * Scheduler - * @description Selects a scheduler. + * String Collection Primitive + * @description A collection of string primitive values */ - SchedulerInvocation: { + StringCollectionInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19488,45 +22785,40 @@ export type components = { */ use_cache?: boolean; /** - * Scheduler - * @description Scheduler to use during inference - * @default euler - * @enum {string} + * Collection + * @description The collection of string values + * @default [] */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + collection?: string[]; /** * type - * @default scheduler + * @default string_collection * @constant */ - type: "scheduler"; + type: "string_collection"; }; - /** SchedulerOutput */ - SchedulerOutput: { + /** + * StringCollectionOutput + * @description Base class for nodes that output a collection of strings + */ + StringCollectionOutput: { /** - * Scheduler - * @description Scheduler to use during inference - * @enum {string} + * Collection + * @description The output strings */ - scheduler: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + collection: string[]; /** * type - * @default scheduler_output + * @default string_collection_output * @constant */ - type: "scheduler_output"; + type: "string_collection_output"; }; /** - * SchedulerPredictionType - * @description Scheduler prediction type. - * @enum {string} - */ - SchedulerPredictionType: "epsilon" | "v_prediction" | "sample"; - /** - * Main Model - SD3 - * @description Loads a SD3 base model, outputting its submodels. + * String Generator + * @description Generated a range of strings for use in a batched generation */ - Sd3ModelLoaderInvocation: { + StringGenerator: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19544,81 +22836,42 @@ export type components = { * @default true */ use_cache?: boolean; - /** @description SD3 model (MMDiTX) to load */ - model: components["schemas"]["ModelIdentifierField"]; - /** - * T5 Encoder - * @description T5 tokenizer and text encoder - * @default null - */ - t5_encoder_model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * CLIP L Encoder - * @description CLIP Embed loader - * @default null - */ - clip_l_model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * CLIP G Encoder - * @description CLIP-G Embed loader - * @default null - */ - clip_g_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * VAE - * @description VAE model to load - * @default null + * Generator Type + * @description The string generator. */ - vae_model?: components["schemas"]["ModelIdentifierField"] | null; + generator: components["schemas"]["StringGeneratorField"]; /** * type - * @default sd3_model_loader + * @default string_generator * @constant */ - type: "sd3_model_loader"; + type: "string_generator"; }; + /** StringGeneratorField */ + StringGeneratorField: Record; /** - * Sd3ModelLoaderOutput - * @description SD3 base model loader output. - */ - Sd3ModelLoaderOutput: { - /** - * Transformer - * @description Transformer - */ - transformer: components["schemas"]["TransformerField"]; - /** - * CLIP L - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip_l: components["schemas"]["CLIPField"]; - /** - * CLIP G - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - */ - clip_g: components["schemas"]["CLIPField"]; - /** - * T5 Encoder - * @description T5 tokenizer and text encoder - */ - t5_encoder: components["schemas"]["T5EncoderField"]; + * StringGeneratorOutput + * @description Base class for nodes that output a collection of strings + */ + StringGeneratorOutput: { /** - * VAE - * @description VAE + * Strings + * @description The generated strings */ - vae: components["schemas"]["VAEField"]; + strings: string[]; /** * type - * @default sd3_model_loader_output + * @default string_generator_output * @constant */ - type: "sd3_model_loader_output"; + type: "string_generator_output"; }; /** - * Prompt - SD3 - * @description Encodes and preps a prompt for a SD3 image. + * String Primitive + * @description A string primitive value */ - Sd3TextEncoderInvocation: { + StringInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19637,41 +22890,23 @@ export type components = { */ use_cache?: boolean; /** - * CLIP L - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip_l?: components["schemas"]["CLIPField"] | null; - /** - * CLIP G - * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count - * @default null - */ - clip_g?: components["schemas"]["CLIPField"] | null; - /** - * T5Encoder - * @description T5 tokenizer and text encoder - * @default null - */ - t5_encoder?: components["schemas"]["T5EncoderField"] | null; - /** - * Prompt - * @description Text prompt to encode. - * @default null + * Value + * @description The string value + * @default */ - prompt?: string | null; + value?: string; /** * type - * @default sd3_text_encoder + * @default string * @constant */ - type: "sd3_text_encoder"; + type: "string"; }; /** - * Apply Seamless - SD1.5, SDXL - * @description Applies the seamless transformation to the Model UNet and VAE. + * String Join + * @description Joins string left to string right */ - SeamlessModeInvocation: { + StringJoinInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19690,65 +22925,29 @@ export type components = { */ use_cache?: boolean; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null - */ - unet?: components["schemas"]["UNetField"] | null; - /** - * VAE - * @description VAE model to load - * @default null - */ - vae?: components["schemas"]["VAEField"] | null; - /** - * Seamless Y - * @description Specify whether Y axis is seamless - * @default true - */ - seamless_y?: boolean; - /** - * Seamless X - * @description Specify whether X axis is seamless - * @default true - */ - seamless_x?: boolean; - /** - * type - * @default seamless - * @constant - */ - type: "seamless"; - }; - /** - * SeamlessModeOutput - * @description Modified Seamless Model output - */ - SeamlessModeOutput: { - /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * String Left + * @description String Left + * @default */ - unet: components["schemas"]["UNetField"] | null; + string_left?: string; /** - * VAE - * @description VAE - * @default null + * String Right + * @description String Right + * @default */ - vae: components["schemas"]["VAEField"] | null; + string_right?: string; /** * type - * @default seamless_output + * @default string_join * @constant */ - type: "seamless_output"; + type: "string_join"; }; /** - * Segment Anything - * @description Runs a Segment Anything Model (SAM or SAM2). + * String Join Three + * @description Joins string left to string middle to string right */ - SegmentAnythingInvocation: { + StringJoinThreeInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -19767,284 +22966,312 @@ export type components = { */ use_cache?: boolean; /** - * Model - * @description The Segment Anything model to use (SAM or SAM2). - * @default null - */ - model?: ("segment-anything-base" | "segment-anything-large" | "segment-anything-huge" | "segment-anything-2-tiny" | "segment-anything-2-small" | "segment-anything-2-base" | "segment-anything-2-large") | null; - /** - * @description The image to segment. - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * Bounding Boxes - * @description The bounding boxes to prompt the model with. - * @default null - */ - bounding_boxes?: components["schemas"]["BoundingBoxField"][] | null; - /** - * Point Lists - * @description The list of point lists to prompt the model with. Each list of points represents a single object. - * @default null + * String Left + * @description String Left + * @default */ - point_lists?: components["schemas"]["SAMPointsField"][] | null; + string_left?: string; /** - * Apply Polygon Refinement - * @description Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging). - * @default true + * String Middle + * @description String Middle + * @default */ - apply_polygon_refinement?: boolean; + string_middle?: string; /** - * Mask Filter - * @description The filtering to apply to the detected masks before merging them into a final output. - * @default all - * @enum {string} + * String Right + * @description String Right + * @default */ - mask_filter?: "all" | "largest" | "highest_box_score"; + string_right?: string; /** * type - * @default segment_anything + * @default string_join_three * @constant */ - type: "segment_anything"; + type: "string_join_three"; }; - /** SessionProcessorStatus */ - SessionProcessorStatus: { + /** + * StringOutput + * @description Base class for nodes that output a single string + */ + StringOutput: { /** - * Is Started - * @description Whether the session processor is started + * Value + * @description The output string */ - is_started: boolean; + value: string; /** - * Is Processing - * @description Whether a session is being processed + * type + * @default string_output + * @constant */ - is_processing: boolean; + type: "string_output"; }; /** - * SessionQueueAndProcessorStatus - * @description The overall status of session queue and processor + * StringPosNegOutput + * @description Base class for invocations that output a positive and negative string */ - SessionQueueAndProcessorStatus: { - queue: components["schemas"]["SessionQueueStatus"]; - processor: components["schemas"]["SessionProcessorStatus"]; - }; - /** SessionQueueCountsByDestination */ - SessionQueueCountsByDestination: { - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Destination - * @description The destination of queue items included in this status - */ - destination: string; - /** - * Pending - * @description Number of queue items with status 'pending' for the destination - */ - pending: number; + StringPosNegOutput: { /** - * In Progress - * @description Number of queue items with status 'in_progress' for the destination + * Positive String + * @description Positive string */ - in_progress: number; + positive_string: string; /** - * Completed - * @description Number of queue items with status 'complete' for the destination + * Negative String + * @description Negative string */ - completed: number; + negative_string: string; /** - * Failed - * @description Number of queue items with status 'error' for the destination + * type + * @default string_pos_neg_output + * @constant */ - failed: number; + type: "string_pos_neg_output"; + }; + /** + * String Replace + * @description Replaces the search string with the replace string + */ + StringReplaceInvocation: { /** - * Canceled - * @description Number of queue items with status 'canceled' for the destination + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - canceled: number; + id: string; /** - * Total - * @description Total number of queue items for the destination + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - total: number; - }; - /** - * SessionQueueItem - * @description Session queue item without the full graph. Used for serialization. - */ - SessionQueueItem: { + is_intermediate?: boolean; /** - * Item Id - * @description The identifier of the session queue item + * Use Cache + * @description Whether or not to use the cache + * @default true */ - item_id: number; + use_cache?: boolean; /** - * Status - * @description The status of this queue item - * @default pending - * @enum {string} + * String + * @description String to work on + * @default */ - status: "pending" | "in_progress" | "completed" | "failed" | "canceled"; + string?: string; /** - * Priority - * @description The priority of this queue item - * @default 0 + * Search String + * @description String to search for + * @default */ - priority: number; + search_string?: string; /** - * Batch Id - * @description The ID of the batch associated with this queue item + * Replace String + * @description String to replace the search + * @default */ - batch_id: string; + replace_string?: string; /** - * Origin - * @description The origin of this queue item. This data is used by the frontend to determine how to handle results. + * Use Regex + * @description Use search string as a regex expression (non regex is case insensitive) + * @default false */ - origin?: string | null; + use_regex?: boolean; /** - * Destination - * @description The origin of this queue item. This data is used by the frontend to determine how to handle results + * type + * @default string_replace + * @constant */ - destination?: string | null; + type: "string_replace"; + }; + /** + * String Split + * @description Splits string into two strings, based on the first occurance of the delimiter. The delimiter will be removed from the string + */ + StringSplitInvocation: { /** - * Session Id - * @description The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed. + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - session_id: string; + id: string; /** - * Error Type - * @description The error type if this queue item errored + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - error_type?: string | null; + is_intermediate?: boolean; /** - * Error Message - * @description The error message if this queue item errored + * Use Cache + * @description Whether or not to use the cache + * @default true */ - error_message?: string | null; + use_cache?: boolean; /** - * Error Traceback - * @description The error traceback if this queue item errored + * String + * @description String to split + * @default */ - error_traceback?: string | null; + string?: string; /** - * Created At - * @description When this queue item was created + * Delimiter + * @description Delimiter to spilt with. blank will split on the first whitespace + * @default */ - created_at: string; + delimiter?: string; /** - * Updated At - * @description When this queue item was updated + * type + * @default string_split + * @constant */ - updated_at: string; + type: "string_split"; + }; + /** + * String Split Negative + * @description Splits string into two strings, inside [] goes into negative string everthing else goes into positive string. Each [ and ] character is replaced with a space + */ + StringSplitNegInvocation: { /** - * Started At - * @description When this queue item was started + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - started_at?: string | null; + id: string; /** - * Completed At - * @description When this queue item was completed + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - completed_at?: string | null; + is_intermediate?: boolean; /** - * Queue Id - * @description The id of the queue with which this item is associated + * Use Cache + * @description Whether or not to use the cache + * @default true */ - queue_id: string; + use_cache?: boolean; /** - * Field Values - * @description The field values that were used for this queue item + * String + * @description String to split + * @default */ - field_values?: components["schemas"]["NodeFieldValue"][] | null; + string?: string; /** - * Retried From Item Id - * @description The item_id of the queue item that this item was retried from + * type + * @default string_split_neg + * @constant */ - retried_from_item_id?: number | null; + type: "string_split_neg"; + }; + /** StylePresetRecordWithImage */ + StylePresetRecordWithImage: { /** - * Is Api Validation Run - * @description Whether this queue item is an API validation run. - * @default false + * Name + * @description The name of the style preset. */ - is_api_validation_run?: boolean; + name: string; + /** @description The preset data */ + preset_data: components["schemas"]["PresetData"]; + /** @description The type of style preset */ + type: components["schemas"]["PresetType"]; /** - * Published Workflow Id - * @description The ID of the published workflow associated with this queue item + * Id + * @description The style preset ID. */ - published_workflow_id?: string | null; + id: string; /** - * Credits - * @description The total credits used for this queue item + * Image + * @description The path for image */ - credits?: number | null; - /** @description The fully-populated session to be executed */ - session: components["schemas"]["GraphExecutionState"]; - /** @description The workflow associated with this queue item */ - workflow?: components["schemas"]["WorkflowWithoutID"] | null; + image: string | null; }; - /** SessionQueueStatus */ - SessionQueueStatus: { + /** + * SubModelType + * @description Submodel type. + * @enum {string} + */ + SubModelType: "unet" | "transformer" | "text_encoder" | "text_encoder_2" | "text_encoder_3" | "tokenizer" | "tokenizer_2" | "tokenizer_3" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker"; + /** SubmodelDefinition */ + SubmodelDefinition: { + /** Path Or Prefix */ + path_or_prefix: string; + model_type: components["schemas"]["ModelType"]; + /** Variant */ + variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | null; + }; + /** + * Subtract Integers + * @description Subtracts two numbers + */ + SubtractInvocation: { /** - * Queue Id - * @description The ID of the queue + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - queue_id: string; + id: string; /** - * Item Id - * @description The current queue item id + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - item_id: number | null; + is_intermediate?: boolean; /** - * Batch Id - * @description The current queue item's batch id + * Use Cache + * @description Whether or not to use the cache + * @default true */ - batch_id: string | null; + use_cache?: boolean; /** - * Session Id - * @description The current queue item's session id + * A + * @description The first number + * @default 0 */ - session_id: string | null; + a?: number; /** - * Pending - * @description Number of queue items with status 'pending' + * B + * @description The second number + * @default 0 */ - pending: number; + b?: number; /** - * In Progress - * @description Number of queue items with status 'in_progress' + * type + * @default sub + * @constant */ - in_progress: number; + type: "sub"; + }; + /** T2IAdapterField */ + T2IAdapterField: { + /** @description The T2I-Adapter image prompt. */ + image: components["schemas"]["ImageField"]; + /** @description The T2I-Adapter model to use. */ + t2i_adapter_model: components["schemas"]["ModelIdentifierField"]; /** - * Completed - * @description Number of queue items with status 'complete' + * Weight + * @description The weight given to the T2I-Adapter + * @default 1 */ - completed: number; + weight?: number | number[]; /** - * Failed - * @description Number of queue items with status 'error' + * Begin Step Percent + * @description When the T2I-Adapter is first applied (% of total steps) + * @default 0 */ - failed: number; + begin_step_percent?: number; /** - * Canceled - * @description Number of queue items with status 'canceled' + * End Step Percent + * @description When the T2I-Adapter is last applied (% of total steps) + * @default 1 */ - canceled: number; + end_step_percent?: number; /** - * Total - * @description Total number of queue items + * Resize Mode + * @description The resize mode to use + * @default just_resize + * @enum {string} */ - total: number; + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; }; /** - * Show Image - * @description Displays a provided image using the OS image viewer, and passes it forward in the pipeline. + * T2I-Adapter - SD1.5, SDXL + * @description Collects T2I-Adapter info to pass to other nodes. */ - ShowImageInvocation: { + T2IAdapterInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -20063,22 +23290,101 @@ export type components = { */ use_cache?: boolean; /** - * @description The image to show + * @description The IP-Adapter image prompt. * @default null */ image?: components["schemas"]["ImageField"] | null; + /** + * T2I-Adapter Model + * @description The T2I-Adapter model. + * @default null + */ + t2i_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; + /** + * Weight + * @description The weight given to the T2I-Adapter + * @default 1 + */ + weight?: number | number[]; + /** + * Begin Step Percent + * @description When the T2I-Adapter is first applied (% of total steps) + * @default 0 + */ + begin_step_percent?: number; + /** + * End Step Percent + * @description When the T2I-Adapter is last applied (% of total steps) + * @default 1 + */ + end_step_percent?: number; + /** + * Resize Mode + * @description The resize mode applied to the T2I-Adapter input image so that it matches the target output size. + * @default just_resize + * @enum {string} + */ + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; /** * type - * @default show_image + * @default t2i_adapter * @constant */ - type: "show_image"; + type: "t2i_adapter"; }; - /** - * SigLIPConfig - * @description Model config for SigLIP. - */ - SigLIPConfig: { + /** T2IAdapterMetadataField */ + T2IAdapterMetadataField: { + /** @description The control image. */ + image: components["schemas"]["ImageField"]; + /** + * @description The control image, after processing. + * @default null + */ + processed_image?: components["schemas"]["ImageField"] | null; + /** @description The T2I-Adapter model to use. */ + t2i_adapter_model: components["schemas"]["ModelIdentifierField"]; + /** + * Weight + * @description The weight given to the T2I-Adapter + * @default 1 + */ + weight?: number | number[]; + /** + * Begin Step Percent + * @description When the T2I-Adapter is first applied (% of total steps) + * @default 0 + */ + begin_step_percent?: number; + /** + * End Step Percent + * @description When the T2I-Adapter is last applied (% of total steps) + * @default 1 + */ + end_step_percent?: number; + /** + * Resize Mode + * @description The resize mode to use + * @default just_resize + * @enum {string} + */ + resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + }; + /** T2IAdapterOutput */ + T2IAdapterOutput: { + /** + * T2I Adapter + * @description T2I-Adapter(s) to apply + */ + t2i_adapter: components["schemas"]["T2IAdapterField"]; + /** + * type + * @default t2i_adapter_output + * @constant + */ + type: "t2i_adapter_output"; + }; + /** T2IAdapter_Diffusers_SD1_Config */ + T2IAdapter_Diffusers_SD1_Config: { /** * Key * @description A unique key for this model. @@ -20105,19 +23411,10 @@ export type components = { */ name: string; /** - * Type - * @default siglip - * @constant - */ - type: "siglip"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -20125,109 +23422,45 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; - /** @default */ - repo_variant?: components["schemas"]["ModelRepoVariant"] | null; - }; - /** - * Image-to-Image (Autoscale) - * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel) until the target scale is reached. - */ - SpandrelImageToImageAutoscaleInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * @description The input image - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * Image-to-Image Model - * @description Image-to-Image model - * @default null - */ - image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * Tile Size - * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. - * @default 512 - */ - tile_size?: number; + usage_info: string | null; /** - * type - * @default spandrel_image_to_image_autoscale + * Format + * @default diffusers * @constant */ - type: "spandrel_image_to_image_autoscale"; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * Scale - * @description The final scale of the output image. If the model does not upscale the image, this will be ignored. - * @default 4 + * Type + * @default t2i_adapter + * @constant */ - scale?: number; + type: "t2i_adapter"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; /** - * Fit To Multiple Of 8 - * @description If true, the output image will be resized to the nearest multiple of 8 in both dimensions. - * @default false + * Base + * @default sd-1 + * @constant */ - fit_to_multiple_of_8?: boolean; + base: "sd-1"; }; - /** - * SpandrelImageToImageConfig - * @description Model config for Spandrel Image to Image models. - */ - SpandrelImageToImageConfig: { + /** T2IAdapter_Diffusers_SDXL_Config */ + T2IAdapter_Diffusers_SDXL_Config: { /** * Key * @description A unique key for this model. @@ -20254,19 +23487,10 @@ export type components = { */ name: string; /** - * Type - * @default spandrel_image_to_image - * @constant - */ - type: "spandrel_image_to_image"; - /** - * Format - * @default checkpoint - * @constant + * Description + * @description Model description */ - format: "checkpoint"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -20274,720 +23498,586 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; + usage_info: string | null; + /** + * Format + * @default diffusers + * @constant + */ + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; + /** + * Type + * @default t2i_adapter + * @constant + */ + type: "t2i_adapter"; + default_settings: components["schemas"]["ControlAdapterDefaultSettings"] | null; + /** + * Base + * @default sdxl + * @constant + */ + base: "sdxl"; + }; + /** T5EncoderField */ + T5EncoderField: { + /** @description Info to load tokenizer submodel */ + tokenizer: components["schemas"]["ModelIdentifierField"]; + /** @description Info to load text_encoder submodel */ + text_encoder: components["schemas"]["ModelIdentifierField"]; + /** + * Loras + * @description LoRAs to apply on model loading + */ + loras: components["schemas"]["LoRAField"][]; }; /** - * Image-to-Image - * @description Run any spandrel image-to-image model (https://github.com/chaiNNer-org/spandrel). + * T5Encoder_BnBLLMint8_Config + * @description Configuration for T5 Encoder models quantized by bitsandbytes' LLM.int8. */ - SpandrelImageToImageInvocation: { + T5Encoder_BnBLLMint8_Config: { /** - * @description The board to save the image to - * @default null + * Key + * @description A unique key for this model. */ - board?: components["schemas"]["BoardField"] | null; + key: string; /** - * @description Optional metadata to be saved with the image - * @default null + * Hash + * @description The hash of the model file(s). */ - metadata?: components["schemas"]["MetadataField"] | null; + hash: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - id: string; + path: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * File Size + * @description The size of the model in bytes. */ - is_intermediate?: boolean; + file_size: number; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Name + * @description Name of the model. */ - use_cache?: boolean; + name: string; /** - * @description The input image - * @default null + * Description + * @description Model description */ - image?: components["schemas"]["ImageField"] | null; + description: string | null; /** - * Image-to-Image Model - * @description Image-to-Image model - * @default null + * Source + * @description The original source of the model (path, URL or repo_id). */ - image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Tile Size - * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. - * @default 512 + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - tile_size?: number; + source_api_response: string | null; /** - * type - * @default spandrel_image_to_image - * @constant + * Cover Image + * @description Url for image to preview model */ - type: "spandrel_image_to_image"; - }; - /** StarredImagesResult */ - StarredImagesResult: { + cover_image: string | null; /** - * Affected Boards - * @description The ids of boards affected by the delete operation + * Usage Info + * @description Usage information for this model */ - affected_boards: string[]; + usage_info: string | null; /** - * Starred Images - * @description The names of the images that were starred + * Base + * @default any + * @constant */ - starred_images: string[]; - }; - /** StarredVideosResult */ - StarredVideosResult: { + base: "any"; /** - * Affected Boards - * @description The ids of boards affected by the delete operation + * Type + * @default t5_encoder + * @constant */ - affected_boards: string[]; + type: "t5_encoder"; /** - * Starred Videos - * @description The ids of the videos that were starred + * Format + * @default bnb_quantized_int8b + * @constant */ - starred_videos: string[]; + format: "bnb_quantized_int8b"; }; - /** StarterModel */ - StarterModel: { - /** Description */ - description: string; - /** Source */ - source: string; - /** Name */ - name: string; - base: components["schemas"]["BaseModelType"]; - type: components["schemas"]["ModelType"]; - format?: components["schemas"]["ModelFormat"] | null; + /** + * T5Encoder_T5Encoder_Config + * @description Configuration for T5 Encoder models in a bespoke, diffusers-like format. The model weights are expected to be in + * a folder called text_encoder_2 inside the model directory, with a config file named model.safetensors.index.json. + */ + T5Encoder_T5Encoder_Config: { /** - * Is Installed - * @default false + * Key + * @description A unique key for this model. */ - is_installed?: boolean; + key: string; /** - * Previous Names - * @default [] + * Hash + * @description The hash of the model file(s). */ - previous_names?: string[]; - /** Dependencies */ - dependencies?: components["schemas"]["StarterModelWithoutDependencies"][] | null; - }; - /** StarterModelBundle */ - StarterModelBundle: { - /** Name */ - name: string; - /** Models */ - models: components["schemas"]["StarterModel"][]; - }; - /** StarterModelResponse */ - StarterModelResponse: { - /** Starter Models */ - starter_models: components["schemas"]["StarterModel"][]; - /** Starter Bundles */ - starter_bundles: { - [key: string]: components["schemas"]["StarterModelBundle"]; - }; - }; - /** StarterModelWithoutDependencies */ - StarterModelWithoutDependencies: { - /** Description */ - description: string; - /** Source */ - source: string; - /** Name */ - name: string; - base: components["schemas"]["BaseModelType"]; - type: components["schemas"]["ModelType"]; - format?: components["schemas"]["ModelFormat"] | null; + hash: string; /** - * Is Installed - * @default false + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - is_installed?: boolean; + path: string; /** - * Previous Names - * @default [] + * File Size + * @description The size of the model in bytes. */ - previous_names?: string[]; - }; - /** - * String2Output - * @description Base class for invocations that output two strings - */ - String2Output: { + file_size: number; /** - * String 1 - * @description string 1 + * Name + * @description Name of the model. */ - string_1: string; + name: string; /** - * String 2 - * @description string 2 + * Description + * @description Model description */ - string_2: string; + description: string | null; /** - * type - * @default string_2_output - * @constant + * Source + * @description The original source of the model (path, URL or repo_id). */ - type: "string_2_output"; - }; - /** - * String Batch - * @description Create a batched generation, where the workflow is executed once for each string in the batch. - */ - StringBatchInvocation: { + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - id: string; + source_api_response: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Cover Image + * @description Url for image to preview model */ - is_intermediate?: boolean; + cover_image: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Usage Info + * @description Usage information for this model */ - use_cache?: boolean; + usage_info: string | null; /** - * Batch Group - * @description The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size. - * @default None - * @enum {string} + * Base + * @default any + * @constant */ - batch_group_id?: "None" | "Group 1" | "Group 2" | "Group 3" | "Group 4" | "Group 5"; + base: "any"; /** - * Strings - * @description The strings to batch over - * @default null + * Type + * @default t5_encoder + * @constant */ - strings?: string[] | null; + type: "t5_encoder"; /** - * type - * @default string_batch + * Format + * @default t5_encoder * @constant */ - type: "string_batch"; + format: "t5_encoder"; }; - /** - * String Collection Primitive - * @description A collection of string primitive values - */ - StringCollectionInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; + /** TBLR */ + TBLR: { + /** Top */ + top: number; + /** Bottom */ + bottom: number; + /** Left */ + left: number; + /** Right */ + right: number; + }; + /** TI_File_SD1_Config */ + TI_File_SD1_Config: { /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Key + * @description A unique key for this model. */ - is_intermediate?: boolean; + key: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Hash + * @description The hash of the model file(s). */ - use_cache?: boolean; + hash: string; /** - * Collection - * @description The collection of string values - * @default [] + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - collection?: string[]; + path: string; /** - * type - * @default string_collection - * @constant + * File Size + * @description The size of the model in bytes. */ - type: "string_collection"; - }; - /** - * StringCollectionOutput - * @description Base class for nodes that output a collection of strings - */ - StringCollectionOutput: { + file_size: number; /** - * Collection - * @description The output strings + * Name + * @description Name of the model. */ - collection: string[]; + name: string; /** - * type - * @default string_collection_output - * @constant + * Description + * @description Model description */ - type: "string_collection_output"; - }; - /** - * String Generator - * @description Generated a range of strings for use in a batched generation - */ - StringGenerator: { + description: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source + * @description The original source of the model (path, URL or repo_id). */ - id: string; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - is_intermediate?: boolean; + source_api_response: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Cover Image + * @description Url for image to preview model */ - use_cache?: boolean; + cover_image: string | null; /** - * Generator Type - * @description The string generator. + * Usage Info + * @description Usage information for this model */ - generator: components["schemas"]["StringGeneratorField"]; + usage_info: string | null; /** - * type - * @default string_generator + * Type + * @default embedding * @constant */ - type: "string_generator"; - }; - /** StringGeneratorField */ - StringGeneratorField: Record; - /** - * StringGeneratorOutput - * @description Base class for nodes that output a collection of strings - */ - StringGeneratorOutput: { + type: "embedding"; /** - * Strings - * @description The generated strings + * Format + * @default embedding_file + * @constant */ - strings: string[]; + format: "embedding_file"; /** - * type - * @default string_generator_output + * Base + * @default sd-1 * @constant */ - type: "string_generator_output"; + base: "sd-1"; }; - /** - * String Primitive - * @description A string primitive value - */ - StringInvocation: { + /** TI_File_SD2_Config */ + TI_File_SD2_Config: { /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Key + * @description A unique key for this model. */ - id: string; + key: string; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Hash + * @description The hash of the model file(s). */ - is_intermediate?: boolean; + hash: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - use_cache?: boolean; + path: string; /** - * Value - * @description The string value - * @default + * File Size + * @description The size of the model in bytes. */ - value?: string; + file_size: number; /** - * type - * @default string - * @constant + * Name + * @description Name of the model. */ - type: "string"; - }; - /** - * String Join - * @description Joins string left to string right - */ - StringJoinInvocation: { + name: string; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Description + * @description Model description */ - id: string; + description: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source + * @description The original source of the model (path, URL or repo_id). */ - is_intermediate?: boolean; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - use_cache?: boolean; + source_api_response: string | null; /** - * String Left - * @description String Left - * @default + * Cover Image + * @description Url for image to preview model */ - string_left?: string; + cover_image: string | null; /** - * String Right - * @description String Right - * @default + * Usage Info + * @description Usage information for this model */ - string_right?: string; + usage_info: string | null; /** - * type - * @default string_join + * Type + * @default embedding * @constant */ - type: "string_join"; - }; - /** - * String Join Three - * @description Joins string left to string middle to string right - */ - StringJoinThreeInvocation: { + type: "embedding"; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Format + * @default embedding_file + * @constant */ - id: string; + format: "embedding_file"; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Base + * @default sd-2 + * @constant */ - is_intermediate?: boolean; + base: "sd-2"; + }; + /** TI_File_SDXL_Config */ + TI_File_SDXL_Config: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Key + * @description A unique key for this model. */ - use_cache?: boolean; + key: string; /** - * String Left - * @description String Left - * @default + * Hash + * @description The hash of the model file(s). */ - string_left?: string; + hash: string; /** - * String Middle - * @description String Middle - * @default + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - string_middle?: string; + path: string; /** - * String Right - * @description String Right - * @default + * File Size + * @description The size of the model in bytes. */ - string_right?: string; + file_size: number; /** - * type - * @default string_join_three - * @constant + * Name + * @description Name of the model. */ - type: "string_join_three"; - }; - /** - * StringOutput - * @description Base class for nodes that output a single string - */ - StringOutput: { + name: string; /** - * Value - * @description The output string + * Description + * @description Model description */ - value: string; + description: string | null; /** - * type - * @default string_output - * @constant + * Source + * @description The original source of the model (path, URL or repo_id). */ - type: "string_output"; - }; - /** - * StringPosNegOutput - * @description Base class for invocations that output a positive and negative string - */ - StringPosNegOutput: { + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Positive String - * @description Positive string + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - positive_string: string; + source_api_response: string | null; /** - * Negative String - * @description Negative string + * Cover Image + * @description Url for image to preview model */ - negative_string: string; + cover_image: string | null; /** - * type - * @default string_pos_neg_output + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default embedding * @constant */ - type: "string_pos_neg_output"; - }; - /** - * String Replace - * @description Replaces the search string with the replace string - */ - StringReplaceInvocation: { + type: "embedding"; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Format + * @default embedding_file + * @constant */ - id: string; + format: "embedding_file"; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Base + * @default sdxl + * @constant */ - is_intermediate?: boolean; + base: "sdxl"; + }; + /** TI_Folder_SD1_Config */ + TI_Folder_SD1_Config: { /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Key + * @description A unique key for this model. */ - use_cache?: boolean; + key: string; /** - * String - * @description String to work on - * @default + * Hash + * @description The hash of the model file(s). */ - string?: string; + hash: string; /** - * Search String - * @description String to search for - * @default + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - search_string?: string; + path: string; /** - * Replace String - * @description String to replace the search - * @default + * File Size + * @description The size of the model in bytes. */ - replace_string?: string; + file_size: number; /** - * Use Regex - * @description Use search string as a regex expression (non regex is case insensitive) - * @default false + * Name + * @description Name of the model. */ - use_regex?: boolean; + name: string; /** - * type - * @default string_replace - * @constant + * Description + * @description Model description */ - type: "string_replace"; - }; - /** - * String Split - * @description Splits string into two strings, based on the first occurance of the delimiter. The delimiter will be removed from the string - */ - StringSplitInvocation: { + description: string | null; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source + * @description The original source of the model (path, URL or repo_id). */ - id: string; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - is_intermediate?: boolean; + source_api_response: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Cover Image + * @description Url for image to preview model */ - use_cache?: boolean; + cover_image: string | null; /** - * String - * @description String to split - * @default + * Usage Info + * @description Usage information for this model */ - string?: string; + usage_info: string | null; /** - * Delimiter - * @description Delimiter to spilt with. blank will split on the first whitespace - * @default + * Type + * @default embedding + * @constant */ - delimiter?: string; + type: "embedding"; /** - * type - * @default string_split + * Format + * @default embedding_folder * @constant */ - type: "string_split"; - }; - /** - * String Split Negative - * @description Splits string into two strings, inside [] goes into negative string everthing else goes into positive string. Each [ and ] character is replaced with a space - */ - StringSplitNegInvocation: { + format: "embedding_folder"; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Base + * @default sd-1 + * @constant */ - id: string; + base: "sd-1"; + }; + /** TI_Folder_SD2_Config */ + TI_Folder_SD2_Config: { /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Key + * @description A unique key for this model. */ - is_intermediate?: boolean; + key: string; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Hash + * @description The hash of the model file(s). */ - use_cache?: boolean; + hash: string; /** - * String - * @description String to split - * @default + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - string?: string; + path: string; /** - * type - * @default string_split_neg - * @constant + * File Size + * @description The size of the model in bytes. */ - type: "string_split_neg"; - }; - /** StylePresetRecordWithImage */ - StylePresetRecordWithImage: { + file_size: number; /** * Name - * @description The name of the style preset. + * @description Name of the model. */ name: string; - /** @description The preset data */ - preset_data: components["schemas"]["PresetData"]; - /** @description The type of style preset */ - type: components["schemas"]["PresetType"]; /** - * Id - * @description The style preset ID. + * Description + * @description Model description */ - id: string; + description: string | null; /** - * Image - * @description The path for image + * Source + * @description The original source of the model (path, URL or repo_id). */ - image: string | null; - }; - /** - * SubModelType - * @description Submodel type. - * @enum {string} - */ - SubModelType: "unet" | "transformer" | "text_encoder" | "text_encoder_2" | "text_encoder_3" | "tokenizer" | "tokenizer_2" | "tokenizer_3" | "vae" | "vae_decoder" | "vae_encoder" | "scheduler" | "safety_checker"; - /** SubmodelDefinition */ - SubmodelDefinition: { - /** Path Or Prefix */ - path_or_prefix: string; - model_type: components["schemas"]["ModelType"]; - /** Variant */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | null; - }; - /** - * Subtract Integers - * @description Subtracts two numbers - */ - SubtractInvocation: { + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - id: string; + source_api_response: string | null; /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false + * Cover Image + * @description Url for image to preview model */ - is_intermediate?: boolean; + cover_image: string | null; /** - * Use Cache - * @description Whether or not to use the cache - * @default true + * Usage Info + * @description Usage information for this model */ - use_cache?: boolean; + usage_info: string | null; /** - * A - * @description The first number - * @default 0 + * Type + * @default embedding + * @constant */ - a?: number; + type: "embedding"; /** - * B - * @description The second number - * @default 0 + * Format + * @default embedding_folder + * @constant */ - b?: number; + format: "embedding_folder"; /** - * type - * @default sub + * Base + * @default sd-2 * @constant */ - type: "sub"; + base: "sd-2"; }; - /** - * T2IAdapterConfig - * @description Model config for T2I. - */ - T2IAdapterConfig: { + /** TI_Folder_SDXL_Config */ + TI_Folder_SDXL_Config: { /** * Key * @description A unique key for this model. @@ -21008,25 +24098,16 @@ export type components = { * @description The size of the model in bytes. */ file_size: number; - /** - * Name - * @description Name of the model. - */ - name: string; - /** - * Type - * @default t2i_adapter - * @constant + /** + * Name + * @description Name of the model. */ - type: "t2i_adapter"; + name: string; /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -21034,75 +24115,63 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["ControlAdapterDefaultSettings"] | null; - /** @default */ - repo_variant?: components["schemas"]["ModelRepoVariant"] | null; - }; - /** T2IAdapterField */ - T2IAdapterField: { - /** @description The T2I-Adapter image prompt. */ - image: components["schemas"]["ImageField"]; - /** @description The T2I-Adapter model to use. */ - t2i_adapter_model: components["schemas"]["ModelIdentifierField"]; + usage_info: string | null; /** - * Weight - * @description The weight given to the T2I-Adapter - * @default 1 + * Type + * @default embedding + * @constant */ - weight?: number | number[]; + type: "embedding"; /** - * Begin Step Percent - * @description When the T2I-Adapter is first applied (% of total steps) - * @default 0 + * Format + * @default embedding_folder + * @constant */ - begin_step_percent?: number; + format: "embedding_folder"; /** - * End Step Percent - * @description When the T2I-Adapter is last applied (% of total steps) - * @default 1 + * Base + * @default sdxl + * @constant */ - end_step_percent?: number; + base: "sdxl"; + }; + /** + * TensorField + * @description A tensor primitive field. + */ + TensorField: { /** - * Resize Mode - * @description The resize mode to use - * @default just_resize - * @enum {string} + * Tensor Name + * @description The name of a tensor. */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + tensor_name: string; + }; + /** Tile */ + Tile: { + /** @description The coordinates of this tile relative to its parent image. */ + coords: components["schemas"]["TBLR"]; + /** @description The amount of overlap with adjacent tiles on each side of this tile. */ + overlap: components["schemas"]["TBLR"]; }; /** - * T2I-Adapter - SD1.5, SDXL - * @description Collects T2I-Adapter info to pass to other nodes. + * Tile to Properties + * @description Split a Tile into its individual properties. */ - T2IAdapterInvocation: { + TileToPropertiesInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21121,369 +24190,363 @@ export type components = { */ use_cache?: boolean; /** - * @description The IP-Adapter image prompt. - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * T2I-Adapter Model - * @description The T2I-Adapter model. + * @description The tile to split into properties. * @default null */ - t2i_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * Weight - * @description The weight given to the T2I-Adapter - * @default 1 - */ - weight?: number | number[]; - /** - * Begin Step Percent - * @description When the T2I-Adapter is first applied (% of total steps) - * @default 0 - */ - begin_step_percent?: number; - /** - * End Step Percent - * @description When the T2I-Adapter is last applied (% of total steps) - * @default 1 - */ - end_step_percent?: number; - /** - * Resize Mode - * @description The resize mode applied to the T2I-Adapter input image so that it matches the target output size. - * @default just_resize - * @enum {string} - */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; + tile?: components["schemas"]["Tile"] | null; /** * type - * @default t2i_adapter + * @default tile_to_properties * @constant */ - type: "t2i_adapter"; + type: "tile_to_properties"; }; - /** T2IAdapterMetadataField */ - T2IAdapterMetadataField: { - /** @description The control image. */ - image: components["schemas"]["ImageField"]; - /** - * @description The control image, after processing. - * @default null - */ - processed_image?: components["schemas"]["ImageField"] | null; - /** @description The T2I-Adapter model to use. */ - t2i_adapter_model: components["schemas"]["ModelIdentifierField"]; - /** - * Weight - * @description The weight given to the T2I-Adapter - * @default 1 - */ - weight?: number | number[]; - /** - * Begin Step Percent - * @description When the T2I-Adapter is first applied (% of total steps) - * @default 0 - */ - begin_step_percent?: number; + /** TileToPropertiesOutput */ + TileToPropertiesOutput: { /** - * End Step Percent - * @description When the T2I-Adapter is last applied (% of total steps) - * @default 1 + * Coords Left + * @description Left coordinate of the tile relative to its parent image. */ - end_step_percent?: number; + coords_left: number; /** - * Resize Mode - * @description The resize mode to use - * @default just_resize - * @enum {string} + * Coords Right + * @description Right coordinate of the tile relative to its parent image. */ - resize_mode?: "just_resize" | "crop_resize" | "fill_resize" | "just_resize_simple"; - }; - /** T2IAdapterOutput */ - T2IAdapterOutput: { + coords_right: number; /** - * T2I Adapter - * @description T2I-Adapter(s) to apply + * Coords Top + * @description Top coordinate of the tile relative to its parent image. */ - t2i_adapter: components["schemas"]["T2IAdapterField"]; + coords_top: number; /** - * type - * @default t2i_adapter_output - * @constant + * Coords Bottom + * @description Bottom coordinate of the tile relative to its parent image. */ - type: "t2i_adapter_output"; - }; - /** T5EncoderBnbQuantizedLlmInt8bConfig */ - T5EncoderBnbQuantizedLlmInt8bConfig: { + coords_bottom: number; /** - * Key - * @description A unique key for this model. + * Width + * @description The width of the tile. Equal to coords_right - coords_left. */ - key: string; + width: number; /** - * Hash - * @description The hash of the model file(s). + * Height + * @description The height of the tile. Equal to coords_bottom - coords_top. */ - hash: string; + height: number; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Overlap Top + * @description Overlap between this tile and its top neighbor. */ - path: string; + overlap_top: number; /** - * File Size - * @description The size of the model in bytes. + * Overlap Bottom + * @description Overlap between this tile and its bottom neighbor. */ - file_size: number; + overlap_bottom: number; /** - * Name - * @description Name of the model. + * Overlap Left + * @description Overlap between this tile and its left neighbor. */ - name: string; + overlap_left: number; /** - * Type - * @default t5_encoder - * @constant + * Overlap Right + * @description Overlap between this tile and its right neighbor. */ - type: "t5_encoder"; + overlap_right: number; /** - * Format - * @default bnb_quantized_int8b + * type + * @default tile_to_properties_output * @constant */ - format: "bnb_quantized_int8b"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + type: "tile_to_properties_output"; + }; + /** TileWithImage */ + TileWithImage: { + tile: components["schemas"]["Tile"]; + image: components["schemas"]["ImageField"]; + }; + /** + * Tiled Multi-Diffusion Denoise - SD1.5, SDXL + * @description Tiled Multi-Diffusion denoising. + * + * This node handles automatically tiling the input image, and is primarily intended for global refinement of images + * in tiled upscaling workflows. Future Multi-Diffusion nodes should allow the user to specify custom regions with + * different parameters for each region to harness the full power of Multi-Diffusion. + * + * This node has a similar interface to the `DenoiseLatents` node, but it has a reduced feature set (no IP-Adapter, + * T2I-Adapter, masking, etc.). + */ + TiledMultiDiffusionDenoiseLatents: { /** - * Source - * @description The original source of the model (path, URL or repo_id). + * Id + * @description The id of this instance of an invocation. Must be unique among all instances of invocations. */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + id: string; /** - * Description - * @description Model description + * Is Intermediate + * @description Whether or not this is an intermediate invocation. + * @default false */ - description?: string | null; + is_intermediate?: boolean; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * Use Cache + * @description Whether or not to use the cache + * @default true */ - source_api_response?: string | null; + use_cache?: boolean; /** - * Cover Image - * @description Url for image to preview model + * @description Positive conditioning tensor + * @default null */ - cover_image?: string | null; + positive_conditioning?: components["schemas"]["ConditioningField"] | null; /** - * Submodels - * @description Loadable submodels in this model + * @description Negative conditioning tensor + * @default null */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + negative_conditioning?: components["schemas"]["ConditioningField"] | null; /** - * Usage Info - * @description Usage information for this model + * @description Noise tensor + * @default null */ - usage_info?: string | null; - }; - /** T5EncoderConfig */ - T5EncoderConfig: { + noise?: components["schemas"]["LatentsField"] | null; /** - * Key - * @description A unique key for this model. + * @description Latents tensor + * @default null */ - key: string; + latents?: components["schemas"]["LatentsField"] | null; /** - * Hash - * @description The hash of the model file(s). + * Tile Height + * @description Height of the tiles in image space. + * @default 1024 */ - hash: string; + tile_height?: number; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Tile Width + * @description Width of the tiles in image space. + * @default 1024 */ - path: string; + tile_width?: number; /** - * File Size - * @description The size of the model in bytes. + * Tile Overlap + * @description The overlap between adjacent tiles in pixel space. (Of course, tile merging is applied in latent space.) Tiles will be cropped during merging (if necessary) to ensure that they overlap by exactly this amount. + * @default 32 */ - file_size: number; + tile_overlap?: number; /** - * Name - * @description Name of the model. + * Steps + * @description Number of steps to run + * @default 18 */ - name: string; + steps?: number; /** - * Type - * @default t5_encoder - * @constant + * CFG Scale + * @description Classifier-Free Guidance scale + * @default 6 */ - type: "t5_encoder"; + cfg_scale?: number | number[]; /** - * Format - * @default t5_encoder - * @constant + * Denoising Start + * @description When to start denoising, expressed a percentage of total steps + * @default 0 */ - format: "t5_encoder"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + denoising_start?: number; /** - * Source - * @description The original source of the model (path, URL or repo_id). + * Denoising End + * @description When to stop denoising, expressed a percentage of total steps + * @default 1 */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + denoising_end?: number; /** - * Description - * @description Model description + * Scheduler + * @description Scheduler to use during inference + * @default euler + * @enum {string} */ - description?: string | null; + scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * UNet + * @description UNet (scheduler, LoRAs) + * @default null */ - source_api_response?: string | null; + unet?: components["schemas"]["UNetField"] | null; /** - * Cover Image - * @description Url for image to preview model + * CFG Rescale Multiplier + * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR + * @default 0 */ - cover_image?: string | null; + cfg_rescale_multiplier?: number; /** - * Submodels - * @description Loadable submodels in this model + * Control + * @default null */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; /** - * Usage Info - * @description Usage information for this model + * type + * @default tiled_multi_diffusion_denoise_latents + * @constant */ - usage_info?: string | null; + type: "tiled_multi_diffusion_denoise_latents"; }; - /** T5EncoderField */ - T5EncoderField: { - /** @description Info to load tokenizer submodel */ - tokenizer: components["schemas"]["ModelIdentifierField"]; - /** @description Info to load text_encoder submodel */ - text_encoder: components["schemas"]["ModelIdentifierField"]; + /** TransformerField */ + TransformerField: { + /** @description Info to load Transformer submodel */ + transformer: components["schemas"]["ModelIdentifierField"]; /** * Loras * @description LoRAs to apply on model loading */ loras: components["schemas"]["LoRAField"][]; }; - /** TBLR */ - TBLR: { - /** Top */ - top: number; - /** Bottom */ - bottom: number; - /** Left */ - left: number; - /** Right */ - right: number; - }; /** - * TensorField - * @description A tensor primitive field. + * UIComponent + * @description The type of UI component to use for a field, used to override the default components, which are + * inferred from the field type. + * @enum {string} */ - TensorField: { - /** - * Tensor Name - * @description The name of a tensor. - */ - tensor_name: string; - }; + UIComponent: "none" | "textarea" | "slider"; /** - * TextualInversionFileConfig - * @description Model config for textual inversion embeddings. + * UIConfigBase + * @description Provides additional node configuration to the UI. + * This is used internally by the @invocation decorator logic. Do not use this directly. */ - TextualInversionFileConfig: { + UIConfigBase: { /** - * Key - * @description A unique key for this model. + * Tags + * @description The node's tags + * @default null */ - key: string; + tags: string[] | null; /** - * Hash - * @description The hash of the model file(s). + * Title + * @description The node's display name + * @default null */ - hash: string; + title: string | null; /** - * Path - * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + * Category + * @description The node's category + * @default null */ - path: string; + category: string | null; /** - * File Size - * @description The size of the model in bytes. + * Version + * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". */ - file_size: number; + version: string; /** - * Name - * @description Name of the model. + * Node Pack + * @description The node pack that this node belongs to, will be 'invokeai' for built-in nodes */ - name: string; + node_pack: string; /** - * Type - * @default embedding - * @constant + * @description The node's classification + * @default stable */ - type: "embedding"; + classification: components["schemas"]["Classification"]; + }; + /** + * UIType + * @description Type hints for the UI for situations in which the field type is not enough to infer the correct UI type. + * + * - Model Fields + * The most common node-author-facing use will be for model fields. Internally, there is no difference + * between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the + * base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that + * the field is an SDXL main model field. + * + * - Any Field + * We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to + * indicate that the field accepts any type. Use with caution. This cannot be used on outputs. + * + * - Scheduler Field + * Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field. + * + * - Internal Fields + * Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate + * handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These + * should not be used by node authors. + * + * - DEPRECATED Fields + * These types are deprecated and should not be used by node authors. A warning will be logged if one is + * used, and the type will be ignored. They are included here for backwards compatibility. + * @enum {string} + */ + UIType: "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict" | "DEPRECATED_MainModelField" | "DEPRECATED_CogView4MainModelField" | "DEPRECATED_FluxMainModelField" | "DEPRECATED_SD3MainModelField" | "DEPRECATED_SDXLMainModelField" | "DEPRECATED_SDXLRefinerModelField" | "DEPRECATED_ONNXModelField" | "DEPRECATED_VAEModelField" | "DEPRECATED_FluxVAEModelField" | "DEPRECATED_LoRAModelField" | "DEPRECATED_ControlNetModelField" | "DEPRECATED_IPAdapterModelField" | "DEPRECATED_T2IAdapterModelField" | "DEPRECATED_T5EncoderModelField" | "DEPRECATED_CLIPEmbedModelField" | "DEPRECATED_CLIPLEmbedModelField" | "DEPRECATED_CLIPGEmbedModelField" | "DEPRECATED_SpandrelImageToImageModelField" | "DEPRECATED_ControlLoRAModelField" | "DEPRECATED_SigLipModelField" | "DEPRECATED_FluxReduxModelField" | "DEPRECATED_LLaVAModelField" | "DEPRECATED_Imagen3ModelField" | "DEPRECATED_Imagen4ModelField" | "DEPRECATED_ChatGPT4oModelField" | "DEPRECATED_Gemini2_5ModelField" | "DEPRECATED_FluxKontextModelField" | "DEPRECATED_Veo3ModelField" | "DEPRECATED_RunwayModelField"; + /** UNetField */ + UNetField: { + /** @description Info to load unet submodel */ + unet: components["schemas"]["ModelIdentifierField"]; + /** @description Info to load scheduler submodel */ + scheduler: components["schemas"]["ModelIdentifierField"]; /** - * Format - * @default embedding_file - * @constant + * Loras + * @description LoRAs to apply on model loading */ - format: "embedding_file"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + loras: components["schemas"]["LoRAField"][]; + /** + * Seamless Axes + * @description Axes("x" and "y") to which apply seamless + */ + seamless_axes?: string[]; + /** + * @description FreeU configuration + * @default null + */ + freeu_config?: components["schemas"]["FreeUConfig"] | null; + }; + /** + * UNetOutput + * @description Base class for invocations that output a UNet field. + */ + UNetOutput: { /** - * Source - * @description The original source of the model (path, URL or repo_id). + * UNet + * @description UNet (scheduler, LoRAs) */ - source: string; - /** @description The type of source */ - source_type: components["schemas"]["ModelSourceType"]; + unet: components["schemas"]["UNetField"]; /** - * Description - * @description Model description + * type + * @default unet_output + * @constant */ - description?: string | null; + type: "unet_output"; + }; + /** + * URLModelSource + * @description A generic URL point to a checkpoint file. + */ + URLModelSource: { /** - * Source Api Response - * @description The original API response from the source, as stringified JSON. + * Url + * Format: uri */ - source_api_response?: string | null; + url: string; + /** Access Token */ + access_token?: string | null; /** - * Cover Image - * @description Url for image to preview model + * @description discriminator enum property added by openapi-typescript + * @enum {string} */ - cover_image?: string | null; + type: "url"; + }; + /** URLRegexTokenPair */ + URLRegexTokenPair: { /** - * Submodels - * @description Loadable submodels in this model + * Url Regex + * @description Regular expression to match against the URL */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + url_regex: string; /** - * Usage Info - * @description Usage information for this model + * Token + * @description Token to use when the URL matches the regex */ - usage_info?: string | null; + token: string; }; /** - * TextualInversionFolderConfig - * @description Model config for textual inversion embeddings. + * Unknown_Config + * @description Model config for unknown models, used as a fallback when we cannot identify a model. */ - TextualInversionFolderConfig: { + Unknown_Config: { /** * Key * @description A unique key for this model. @@ -21510,19 +24573,10 @@ export type components = { */ name: string; /** - * Type - * @default embedding - * @constant - */ - type: "embedding"; - /** - * Format - * @default embedding_folder - * @constant + * Description + * @description Model description */ - format: "embedding_folder"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -21530,46 +24584,55 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; - }; - /** Tile */ - Tile: { - /** @description The coordinates of this tile relative to its parent image. */ - coords: components["schemas"]["TBLR"]; - /** @description The amount of overlap with adjacent tiles on each side of this tile. */ - overlap: components["schemas"]["TBLR"]; + usage_info: string | null; + /** + * Base + * @default unknown + * @constant + */ + base: "unknown"; + /** + * Type + * @default unknown + * @constant + */ + type: "unknown"; + /** + * Format + * @default unknown + * @constant + */ + format: "unknown"; }; /** - * Tile to Properties - * @description Split a Tile into its individual properties. + * Unsharp Mask + * @description Applies an unsharp mask filter to an image */ - TileToPropertiesInvocation: { + UnsharpMaskInvocation: { + /** + * @description The board to save the image to + * @default null + */ + board?: components["schemas"]["BoardField"] | null; + /** + * @description Optional metadata to be saved with the image + * @default null + */ + metadata?: components["schemas"]["MetadataField"] | null; /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21588,93 +24651,83 @@ export type components = { */ use_cache?: boolean; /** - * @description The tile to split into properties. + * @description The image to use * @default null */ - tile?: components["schemas"]["Tile"] | null; - /** - * type - * @default tile_to_properties - * @constant - */ - type: "tile_to_properties"; - }; - /** TileToPropertiesOutput */ - TileToPropertiesOutput: { - /** - * Coords Left - * @description Left coordinate of the tile relative to its parent image. - */ - coords_left: number; + image?: components["schemas"]["ImageField"] | null; /** - * Coords Right - * @description Right coordinate of the tile relative to its parent image. + * Radius + * @description Unsharp mask radius + * @default 2 */ - coords_right: number; + radius?: number; /** - * Coords Top - * @description Top coordinate of the tile relative to its parent image. + * Strength + * @description Unsharp mask strength + * @default 50 */ - coords_top: number; + strength?: number; /** - * Coords Bottom - * @description Bottom coordinate of the tile relative to its parent image. + * type + * @default unsharp_mask + * @constant */ - coords_bottom: number; + type: "unsharp_mask"; + }; + /** UnstarredImagesResult */ + UnstarredImagesResult: { /** - * Width - * @description The width of the tile. Equal to coords_right - coords_left. + * Affected Boards + * @description The ids of boards affected by the delete operation */ - width: number; + affected_boards: string[]; /** - * Height - * @description The height of the tile. Equal to coords_bottom - coords_top. + * Unstarred Images + * @description The names of the images that were unstarred */ - height: number; + unstarred_images: string[]; + }; + /** UnstarredVideosResult */ + UnstarredVideosResult: { /** - * Overlap Top - * @description Overlap between this tile and its top neighbor. + * Affected Boards + * @description The ids of boards affected by the delete operation */ - overlap_top: number; + affected_boards: string[]; /** - * Overlap Bottom - * @description Overlap between this tile and its bottom neighbor. + * Unstarred Videos + * @description The ids of the videos that were unstarred */ - overlap_bottom: number; + unstarred_videos: string[]; + }; + /** Upscaler */ + Upscaler: { /** - * Overlap Left - * @description Overlap between this tile and its left neighbor. + * Upscaling Method + * @description Name of upscaling method */ - overlap_left: number; + upscaling_method: string; /** - * Overlap Right - * @description Overlap between this tile and its right neighbor. + * Upscaling Models + * @description List of upscaling models for this method */ - overlap_right: number; + upscaling_models: string[]; + }; + /** VAEField */ + VAEField: { + /** @description Info to load vae submodel */ + vae: components["schemas"]["ModelIdentifierField"]; /** - * type - * @default tile_to_properties_output - * @constant + * Seamless Axes + * @description Axes("x" and "y") to which apply seamless */ - type: "tile_to_properties_output"; - }; - /** TileWithImage */ - TileWithImage: { - tile: components["schemas"]["Tile"]; - image: components["schemas"]["ImageField"]; + seamless_axes?: string[]; }; /** - * Tiled Multi-Diffusion Denoise - SD1.5, SDXL - * @description Tiled Multi-Diffusion denoising. - * - * This node handles automatically tiling the input image, and is primarily intended for global refinement of images - * in tiled upscaling workflows. Future Multi-Diffusion nodes should allow the user to specify custom regions with - * different parameters for each region to harness the full power of Multi-Diffusion. - * - * This node has a similar interface to the `DenoiseLatents` node, but it has a reduced feature set (no IP-Adapter, - * T2I-Adapter, masking, etc.). + * VAE Model - SD1.5, SD2, SDXL, SD3, FLUX + * @description Loads a VAE model, outputting a VaeLoaderOutput */ - TiledMultiDiffusionDenoiseLatents: { + VAELoaderInvocation: { /** * Id * @description The id of this instance of an invocation. Must be unique among all instances of invocations. @@ -21693,255 +24746,193 @@ export type components = { */ use_cache?: boolean; /** - * @description Positive conditioning tensor + * VAE + * @description VAE model to load * @default null */ - positive_conditioning?: components["schemas"]["ConditioningField"] | null; + vae_model?: components["schemas"]["ModelIdentifierField"] | null; /** - * @description Negative conditioning tensor - * @default null + * type + * @default vae_loader + * @constant */ - negative_conditioning?: components["schemas"]["ConditioningField"] | null; + type: "vae_loader"; + }; + /** + * VAEOutput + * @description Base class for invocations that output a VAE field + */ + VAEOutput: { /** - * @description Noise tensor - * @default null + * VAE + * @description VAE */ - noise?: components["schemas"]["LatentsField"] | null; + vae: components["schemas"]["VAEField"]; /** - * @description Latents tensor - * @default null + * type + * @default vae_output + * @constant */ - latents?: components["schemas"]["LatentsField"] | null; + type: "vae_output"; + }; + /** VAE_Checkpoint_FLUX_Config */ + VAE_Checkpoint_FLUX_Config: { /** - * Tile Height - * @description Height of the tiles in image space. - * @default 1024 + * Key + * @description A unique key for this model. */ - tile_height?: number; + key: string; /** - * Tile Width - * @description Width of the tiles in image space. - * @default 1024 + * Hash + * @description The hash of the model file(s). */ - tile_width?: number; + hash: string; /** - * Tile Overlap - * @description The overlap between adjacent tiles in pixel space. (Of course, tile merging is applied in latent space.) Tiles will be cropped during merging (if necessary) to ensure that they overlap by exactly this amount. - * @default 32 + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - tile_overlap?: number; + path: string; /** - * Steps - * @description Number of steps to run - * @default 18 + * File Size + * @description The size of the model in bytes. */ - steps?: number; + file_size: number; /** - * CFG Scale - * @description Classifier-Free Guidance scale - * @default 6 + * Name + * @description Name of the model. */ - cfg_scale?: number | number[]; + name: string; /** - * Denoising Start - * @description When to start denoising, expressed a percentage of total steps - * @default 0 + * Description + * @description Model description */ - denoising_start?: number; + description: string | null; /** - * Denoising End - * @description When to stop denoising, expressed a percentage of total steps - * @default 1 + * Source + * @description The original source of the model (path, URL or repo_id). */ - denoising_end?: number; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * Scheduler - * @description Scheduler to use during inference - * @default euler - * @enum {string} + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - scheduler?: "ddim" | "ddpm" | "deis" | "deis_k" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_k" | "kdpm_2_a" | "kdpm_2_a_k" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_3m" | "dpmpp_3m_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc" | "unipc_k" | "lcm" | "tcd"; + source_api_response: string | null; /** - * UNet - * @description UNet (scheduler, LoRAs) - * @default null + * Cover Image + * @description Url for image to preview model */ - unet?: components["schemas"]["UNetField"] | null; + cover_image: string | null; /** - * CFG Rescale Multiplier - * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR - * @default 0 + * Usage Info + * @description Usage information for this model */ - cfg_rescale_multiplier?: number; + usage_info: string | null; /** - * Control - * @default null + * Config Path + * @description Path to the config for this model, if any. */ - control?: components["schemas"]["ControlField"] | components["schemas"]["ControlField"][] | null; + config_path: string | null; /** - * type - * @default tiled_multi_diffusion_denoise_latents + * Type + * @default vae * @constant */ - type: "tiled_multi_diffusion_denoise_latents"; - }; - /** TransformerField */ - TransformerField: { - /** @description Info to load Transformer submodel */ - transformer: components["schemas"]["ModelIdentifierField"]; + type: "vae"; /** - * Loras - * @description LoRAs to apply on model loading + * Format + * @default checkpoint + * @constant */ - loras: components["schemas"]["LoRAField"][]; - }; - /** - * UIComponent - * @description The type of UI component to use for a field, used to override the default components, which are - * inferred from the field type. - * @enum {string} - */ - UIComponent: "none" | "textarea" | "slider"; - /** - * UIConfigBase - * @description Provides additional node configuration to the UI. - * This is used internally by the @invocation decorator logic. Do not use this directly. - */ - UIConfigBase: { + format: "checkpoint"; /** - * Tags - * @description The node's tags - * @default null + * Base + * @default flux + * @constant */ - tags: string[] | null; + base: "flux"; + }; + /** VAE_Checkpoint_SD1_Config */ + VAE_Checkpoint_SD1_Config: { /** - * Title - * @description The node's display name - * @default null + * Key + * @description A unique key for this model. */ - title: string | null; + key: string; /** - * Category - * @description The node's category - * @default null + * Hash + * @description The hash of the model file(s). */ - category: string | null; + hash: string; /** - * Version - * @description The node's version. Should be a valid semver string e.g. "1.0.0" or "3.8.13". + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. */ - version: string; + path: string; /** - * Node Pack - * @description The node pack that this node belongs to, will be 'invokeai' for built-in nodes + * File Size + * @description The size of the model in bytes. */ - node_pack: string; + file_size: number; /** - * @description The node's classification - * @default stable + * Name + * @description Name of the model. */ - classification: components["schemas"]["Classification"]; - }; - /** - * UIType - * @description Type hints for the UI for situations in which the field type is not enough to infer the correct UI type. - * - * - Model Fields - * The most common node-author-facing use will be for model fields. Internally, there is no difference - * between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the - * base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that - * the field is an SDXL main model field. - * - * - Any Field - * We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to - * indicate that the field accepts any type. Use with caution. This cannot be used on outputs. - * - * - Scheduler Field - * Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field. - * - * - Internal Fields - * Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate - * handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These - * should not be used by node authors. - * - * - DEPRECATED Fields - * These types are deprecated and should not be used by node authors. A warning will be logged if one is - * used, and the type will be ignored. They are included here for backwards compatibility. - * @enum {string} - */ - UIType: "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict" | "DEPRECATED_MainModelField" | "DEPRECATED_CogView4MainModelField" | "DEPRECATED_FluxMainModelField" | "DEPRECATED_SD3MainModelField" | "DEPRECATED_SDXLMainModelField" | "DEPRECATED_SDXLRefinerModelField" | "DEPRECATED_ONNXModelField" | "DEPRECATED_VAEModelField" | "DEPRECATED_FluxVAEModelField" | "DEPRECATED_LoRAModelField" | "DEPRECATED_ControlNetModelField" | "DEPRECATED_IPAdapterModelField" | "DEPRECATED_T2IAdapterModelField" | "DEPRECATED_T5EncoderModelField" | "DEPRECATED_CLIPEmbedModelField" | "DEPRECATED_CLIPLEmbedModelField" | "DEPRECATED_CLIPGEmbedModelField" | "DEPRECATED_SpandrelImageToImageModelField" | "DEPRECATED_ControlLoRAModelField" | "DEPRECATED_SigLipModelField" | "DEPRECATED_FluxReduxModelField" | "DEPRECATED_LLaVAModelField" | "DEPRECATED_Imagen3ModelField" | "DEPRECATED_Imagen4ModelField" | "DEPRECATED_ChatGPT4oModelField" | "DEPRECATED_Gemini2_5ModelField" | "DEPRECATED_FluxKontextModelField" | "DEPRECATED_Veo3ModelField" | "DEPRECATED_RunwayModelField"; - /** UNetField */ - UNetField: { - /** @description Info to load unet submodel */ - unet: components["schemas"]["ModelIdentifierField"]; - /** @description Info to load scheduler submodel */ - scheduler: components["schemas"]["ModelIdentifierField"]; + name: string; /** - * Loras - * @description LoRAs to apply on model loading + * Description + * @description Model description */ - loras: components["schemas"]["LoRAField"][]; + description: string | null; /** - * Seamless Axes - * @description Axes("x" and "y") to which apply seamless + * Source + * @description The original source of the model (path, URL or repo_id). */ - seamless_axes?: string[]; + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; /** - * @description FreeU configuration - * @default null + * Source Api Response + * @description The original API response from the source, as stringified JSON. */ - freeu_config?: components["schemas"]["FreeUConfig"] | null; - }; - /** - * UNetOutput - * @description Base class for invocations that output a UNet field. - */ - UNetOutput: { + source_api_response: string | null; /** - * UNet - * @description UNet (scheduler, LoRAs) + * Cover Image + * @description Url for image to preview model */ - unet: components["schemas"]["UNetField"]; + cover_image: string | null; /** - * type - * @default unet_output - * @constant + * Usage Info + * @description Usage information for this model */ - type: "unet_output"; - }; - /** - * URLModelSource - * @description A generic URL point to a checkpoint file. - */ - URLModelSource: { + usage_info: string | null; /** - * Url - * Format: uri + * Config Path + * @description Path to the config for this model, if any. */ - url: string; - /** Access Token */ - access_token?: string | null; + config_path: string | null; /** - * @description discriminator enum property added by openapi-typescript - * @enum {string} + * Type + * @default vae + * @constant */ - type: "url"; - }; - /** URLRegexTokenPair */ - URLRegexTokenPair: { + type: "vae"; /** - * Url Regex - * @description Regular expression to match against the URL + * Format + * @default checkpoint + * @constant */ - url_regex: string; + format: "checkpoint"; /** - * Token - * @description Token to use when the URL matches the regex + * Base + * @default sd-1 + * @constant */ - token: string; + base: "sd-1"; }; - /** UnknownModelConfig */ - UnknownModelConfig: { + /** VAE_Checkpoint_SD2_Config */ + VAE_Checkpoint_SD2_Config: { /** * Key * @description A unique key for this model. @@ -21968,23 +24959,10 @@ export type components = { */ name: string; /** - * Type - * @default unknown - * @constant - */ - type: "unknown"; - /** - * Format - * @default unknown - * @constant - */ - format: "unknown"; - /** - * Base - * @default unknown - * @constant + * Description + * @description Model description */ - base: "unknown"; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -21992,134 +24970,47 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; - }; - /** - * Unsharp Mask - * @description Applies an unsharp mask filter to an image - */ - UnsharpMaskInvocation: { - /** - * @description The board to save the image to - * @default null - */ - board?: components["schemas"]["BoardField"] | null; - /** - * @description Optional metadata to be saved with the image - * @default null - */ - metadata?: components["schemas"]["MetadataField"] | null; - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * @description The image to use - * @default null - */ - image?: components["schemas"]["ImageField"] | null; - /** - * Radius - * @description Unsharp mask radius - * @default 2 - */ - radius?: number; + usage_info: string | null; /** - * Strength - * @description Unsharp mask strength - * @default 50 + * Config Path + * @description Path to the config for this model, if any. */ - strength?: number; + config_path: string | null; /** - * type - * @default unsharp_mask + * Type + * @default vae * @constant */ - type: "unsharp_mask"; - }; - /** UnstarredImagesResult */ - UnstarredImagesResult: { - /** - * Affected Boards - * @description The ids of boards affected by the delete operation - */ - affected_boards: string[]; - /** - * Unstarred Images - * @description The names of the images that were unstarred - */ - unstarred_images: string[]; - }; - /** UnstarredVideosResult */ - UnstarredVideosResult: { - /** - * Affected Boards - * @description The ids of boards affected by the delete operation - */ - affected_boards: string[]; - /** - * Unstarred Videos - * @description The ids of the videos that were unstarred - */ - unstarred_videos: string[]; - }; - /** Upscaler */ - Upscaler: { + type: "vae"; /** - * Upscaling Method - * @description Name of upscaling method + * Format + * @default checkpoint + * @constant */ - upscaling_method: string; + format: "checkpoint"; /** - * Upscaling Models - * @description List of upscaling models for this method + * Base + * @default sd-2 + * @constant */ - upscaling_models: string[]; + base: "sd-2"; }; - /** - * VAECheckpointConfig - * @description Model config for standalone VAE models. - */ - VAECheckpointConfig: { + /** VAE_Checkpoint_SDXL_Config */ + VAE_Checkpoint_SDXL_Config: { /** * Key * @description A unique key for this model. @@ -22146,20 +25037,10 @@ export type components = { */ name: string; /** - * Type - * @default vae - * @constant - */ - type: "vae"; - /** - * Format - * @description Format of the provided checkpoint model - * @default checkpoint - * @enum {string} + * Description + * @description Model description */ - format: "checkpoint" | "bnb_quantized_nf4b" | "gguf_quantized"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -22167,49 +25048,47 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; + usage_info: string | null; /** * Config Path - * @description path to the checkpoint model config file + * @description Path to the config for this model, if any. + */ + config_path: string | null; + /** + * Type + * @default vae + * @constant */ - config_path: string; + type: "vae"; + /** + * Format + * @default checkpoint + * @constant + */ + format: "checkpoint"; /** - * Converted At - * @description When this model was last converted to diffusers + * Base + * @default sdxl + * @constant */ - converted_at?: number | null; + base: "sdxl"; }; - /** - * VAEDiffusersConfig - * @description Model config for standalone VAE models (diffusers version). - */ - VAEDiffusersConfig: { + /** VAE_Diffusers_SD1_Config */ + VAE_Diffusers_SD1_Config: { /** * Key * @description A unique key for this model. @@ -22236,19 +25115,10 @@ export type components = { */ name: string; /** - * Type - * @default vae - * @constant - */ - type: "vae"; - /** - * Format - * @default diffusers - * @constant + * Description + * @description Model description */ - format: "diffusers"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -22256,128 +25126,44 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; - /** - * Cover Image - * @description Url for image to preview model - */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; - /** - * Usage Info - * @description Usage information for this model - */ - usage_info?: string | null; - }; - /** VAEField */ - VAEField: { - /** @description Info to load vae submodel */ - vae: components["schemas"]["ModelIdentifierField"]; - /** - * Seamless Axes - * @description Axes("x" and "y") to which apply seamless - */ - seamless_axes?: string[]; - }; - /** - * VAE Model - SD1.5, SD2, SDXL, SD3, FLUX - * @description Loads a VAE model, outputting a VaeLoaderOutput - */ - VAELoaderInvocation: { - /** - * Id - * @description The id of this instance of an invocation. Must be unique among all instances of invocations. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this is an intermediate invocation. - * @default false - */ - is_intermediate?: boolean; - /** - * Use Cache - * @description Whether or not to use the cache - * @default true - */ - use_cache?: boolean; - /** - * VAE - * @description VAE model to load - * @default null - */ - vae_model?: components["schemas"]["ModelIdentifierField"] | null; - /** - * type - * @default vae_loader - * @constant - */ - type: "vae_loader"; - }; - /** - * VAEOutput - * @description Base class for invocations that output a VAE field - */ - VAEOutput: { + source_api_response: string | null; /** - * VAE - * @description VAE + * Cover Image + * @description Url for image to preview model */ - vae: components["schemas"]["VAEField"]; + cover_image: string | null; /** - * type - * @default vae_output - * @constant + * Usage Info + * @description Usage information for this model */ - type: "vae_output"; - }; - /** ValidationError */ - ValidationError: { - /** Location */ - loc: (string | number)[]; - /** Message */ - msg: string; - /** Error Type */ - type: string; - }; - /** ValidationRunData */ - ValidationRunData: { + usage_info: string | null; /** - * Workflow Id - * @description The id of the workflow being published. + * Format + * @default diffusers + * @constant */ - workflow_id: string; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * Input Fields - * @description The input fields for the published workflow + * Type + * @default vae + * @constant */ - input_fields: components["schemas"]["FieldIdentifier"][]; + type: "vae"; /** - * Output Fields - * @description The output fields for the published workflow + * Base + * @default sd-1 + * @constant */ - output_fields: components["schemas"]["FieldIdentifier"][]; + base: "sd-1"; }; - /** - * VideoApiModelConfig - * @description Model config for API-based video models. - */ - VideoApiModelConfig: { + /** VAE_Diffusers_SDXL_Config */ + VAE_Diffusers_SDXL_Config: { /** * Key * @description A unique key for this model. @@ -22404,19 +25190,10 @@ export type components = { */ name: string; /** - * Type - * @default video - * @constant - */ - type: "video"; - /** - * Format - * @default api - * @constant + * Description + * @description Model description */ - format: "api"; - /** @description The base model. */ - base: components["schemas"]["BaseModelType"]; + description: string | null; /** * Source * @description The original source of the model (path, URL or repo_id). @@ -22424,45 +25201,68 @@ export type components = { source: string; /** @description The type of source */ source_type: components["schemas"]["ModelSourceType"]; - /** - * Description - * @description Model description - */ - description?: string | null; /** * Source Api Response * @description The original API response from the source, as stringified JSON. */ - source_api_response?: string | null; + source_api_response: string | null; /** * Cover Image * @description Url for image to preview model */ - cover_image?: string | null; - /** - * Submodels - * @description Loadable submodels in this model - */ - submodels?: { - [key: string]: components["schemas"]["SubmodelDefinition"]; - } | null; + cover_image: string | null; /** * Usage Info * @description Usage information for this model */ - usage_info?: string | null; + usage_info: string | null; /** - * Trigger Phrases - * @description Set of trigger phrases for this model + * Format + * @default diffusers + * @constant */ - trigger_phrases?: string[] | null; - /** @description Default settings for this model */ - default_settings?: components["schemas"]["MainModelDefaultSettings"] | null; + format: "diffusers"; + /** @default */ + repo_variant: components["schemas"]["ModelRepoVariant"]; /** - * Variant - * @default normal + * Type + * @default vae + * @constant + */ + type: "vae"; + /** + * Base + * @default sdxl + * @constant + */ + base: "sdxl"; + }; + /** ValidationError */ + ValidationError: { + /** Location */ + loc: (string | number)[]; + /** Message */ + msg: string; + /** Error Type */ + type: string; + }; + /** ValidationRunData */ + ValidationRunData: { + /** + * Workflow Id + * @description The id of the workflow being published. + */ + workflow_id: string; + /** + * Input Fields + * @description The input fields for the published workflow + */ + input_fields: components["schemas"]["FieldIdentifier"][]; + /** + * Output Fields + * @description The output fields for the published workflow */ - variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | null; + output_fields: components["schemas"]["FieldIdentifier"][]; }; /** * VideoDTO @@ -22622,6 +25422,166 @@ export type components = { */ starred?: boolean | null; }; + /** Video_ExternalAPI_Runway_Config */ + Video_ExternalAPI_Runway_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default video + * @constant + */ + type: "video"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + /** + * Format + * @default api + * @constant + */ + format: "api"; + /** + * Base + * @default runway + * @constant + */ + base: "runway"; + }; + /** Video_ExternalAPI_Veo3_Config */ + Video_ExternalAPI_Veo3_Config: { + /** + * Key + * @description A unique key for this model. + */ + key: string; + /** + * Hash + * @description The hash of the model file(s). + */ + hash: string; + /** + * Path + * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory. + */ + path: string; + /** + * File Size + * @description The size of the model in bytes. + */ + file_size: number; + /** + * Name + * @description Name of the model. + */ + name: string; + /** + * Description + * @description Model description + */ + description: string | null; + /** + * Source + * @description The original source of the model (path, URL or repo_id). + */ + source: string; + /** @description The type of source */ + source_type: components["schemas"]["ModelSourceType"]; + /** + * Source Api Response + * @description The original API response from the source, as stringified JSON. + */ + source_api_response: string | null; + /** + * Cover Image + * @description Url for image to preview model + */ + cover_image: string | null; + /** + * Usage Info + * @description Usage information for this model + */ + usage_info: string | null; + /** + * Type + * @default video + * @constant + */ + type: "video"; + /** + * Trigger Phrases + * @description Set of trigger phrases for this model + */ + trigger_phrases: string[] | null; + /** @description Default settings for this model */ + default_settings: components["schemas"]["MainModelDefaultSettings"] | null; + /** + * Format + * @default api + * @constant + */ + format: "api"; + /** + * Base + * @default veo3 + * @constant + */ + base: "veo3"; + }; /** Workflow */ Workflow: { /** @@ -23030,7 +25990,7 @@ export interface operations { [name: string]: unknown; }; content: { - "application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Validation Error */ @@ -23080,7 +26040,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -23185,7 +26145,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ @@ -23699,7 +26659,7 @@ export interface operations { * "repo_variant": "fp16", * "upcast_attention": false * } */ - "application/json": components["schemas"]["MainDiffusersConfig"] | components["schemas"]["MainCheckpointConfig"] | components["schemas"]["MainBnbQuantized4bCheckpointConfig"] | components["schemas"]["MainGGUFCheckpointConfig"] | components["schemas"]["VAEDiffusersConfig"] | components["schemas"]["VAECheckpointConfig"] | components["schemas"]["ControlNetDiffusersConfig"] | components["schemas"]["ControlNetCheckpointConfig"] | components["schemas"]["LoRALyCORISConfig"] | components["schemas"]["LoRAOmiConfig"] | components["schemas"]["ControlLoRALyCORISConfig"] | components["schemas"]["ControlLoRADiffusersConfig"] | components["schemas"]["LoRADiffusersConfig"] | components["schemas"]["T5EncoderConfig"] | components["schemas"]["T5EncoderBnbQuantizedLlmInt8bConfig"] | components["schemas"]["TextualInversionFileConfig"] | components["schemas"]["TextualInversionFolderConfig"] | components["schemas"]["IPAdapterInvokeAIConfig"] | components["schemas"]["IPAdapterCheckpointConfig"] | components["schemas"]["T2IAdapterConfig"] | components["schemas"]["SpandrelImageToImageConfig"] | components["schemas"]["CLIPVisionDiffusersConfig"] | components["schemas"]["CLIPLEmbedDiffusersConfig"] | components["schemas"]["CLIPGEmbedDiffusersConfig"] | components["schemas"]["SigLIPConfig"] | components["schemas"]["FluxReduxConfig"] | components["schemas"]["LlavaOnevisionConfig"] | components["schemas"]["ApiModelConfig"] | components["schemas"]["VideoApiModelConfig"] | components["schemas"]["UnknownModelConfig"]; + "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["Main_ExternalAPI_ChatGPT4o_Config"] | components["schemas"]["Main_ExternalAPI_Gemini2_5_Config"] | components["schemas"]["Main_ExternalAPI_Imagen3_Config"] | components["schemas"]["Main_ExternalAPI_Imagen4_Config"] | components["schemas"]["Main_ExternalAPI_FluxKontext_Config"] | components["schemas"]["Video_ExternalAPI_Veo3_Config"] | components["schemas"]["Video_ExternalAPI_Runway_Config"] | components["schemas"]["Unknown_Config"]; }; }; /** @description Bad request */ diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index b9798c04d99..8b0cafe46b3 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -106,51 +106,36 @@ export const isVideoDTO = (dto: ImageDTO | VideoDTO): dto is VideoDTO => { }; // Model Configs -export type ControlLoRAModelConfig = S['ControlLoRALyCORISConfig'] | S['ControlLoRADiffusersConfig']; -export type LoRAModelConfig = S['LoRADiffusersConfig'] | S['LoRALyCORISConfig'] | S['LoRAOmiConfig']; -export type VAEModelConfig = S['VAECheckpointConfig'] | S['VAEDiffusersConfig']; -export type ControlNetModelConfig = S['ControlNetDiffusersConfig'] | S['ControlNetCheckpointConfig']; -export type IPAdapterModelConfig = S['IPAdapterInvokeAIConfig'] | S['IPAdapterCheckpointConfig']; -export type T2IAdapterModelConfig = S['T2IAdapterConfig']; -export type CLIPLEmbedModelConfig = S['CLIPLEmbedDiffusersConfig']; -export type CLIPGEmbedModelConfig = S['CLIPGEmbedDiffusersConfig']; -export type CLIPEmbedModelConfig = CLIPLEmbedModelConfig | CLIPGEmbedModelConfig; -type LlavaOnevisionConfig = S['LlavaOnevisionConfig']; -export type T5EncoderModelConfig = S['T5EncoderConfig']; -export type T5EncoderBnbQuantizedLlmInt8bModelConfig = S['T5EncoderBnbQuantizedLlmInt8bConfig']; -export type SpandrelImageToImageModelConfig = S['SpandrelImageToImageConfig']; -type TextualInversionModelConfig = S['TextualInversionFileConfig'] | S['TextualInversionFolderConfig']; -type DiffusersModelConfig = S['MainDiffusersConfig']; -export type CheckpointModelConfig = S['MainCheckpointConfig']; -type CLIPVisionDiffusersConfig = S['CLIPVisionDiffusersConfig']; -type SigLipModelConfig = S['SigLIPConfig']; -export type FLUXReduxModelConfig = S['FluxReduxConfig']; -type ApiModelConfig = S['ApiModelConfig']; -export type VideoApiModelConfig = S['VideoApiModelConfig']; -type UnknownModelConfig = S['UnknownModelConfig']; -export type MainModelConfig = DiffusersModelConfig | CheckpointModelConfig | ApiModelConfig; +export type AnyModelConfig = S['AnyModelConfig']; +export type MainModelConfig = Extract; +export type FLUXModelConfig = Extract; +export type ControlLoRAModelConfig = Extract; +export type LoRAModelConfig = Extract; +export type VAEModelConfig = Extract; +export type ControlNetModelConfig = Extract; +export type IPAdapterModelConfig = Extract; +export type T2IAdapterModelConfig = Extract; +export type CLIPLEmbedModelConfig = Extract; +export type CLIPGEmbedModelConfig = Extract; +export type CLIPEmbedModelConfig = Extract; +type LlavaOnevisionConfig = Extract; +export type T5EncoderModelConfig = Extract; +export type T5EncoderBnbQuantizedLlmInt8bModelConfig = Extract< + S['AnyModelConfig'], + { type: 't5_encoder'; format: 'bnb_quantized_int8b' } +>; +export type SpandrelImageToImageModelConfig = Extract; +export type CheckpointModelConfig = Extract; +type CLIPVisionDiffusersConfig = Extract; +type SigLipModelConfig = Extract; +export type FLUXReduxModelConfig = Extract; +type ApiModelConfig = Extract; +export type VideoApiModelConfig = Extract; +type UnknownModelConfig = Extract; export type FLUXKontextModelConfig = MainModelConfig; export type ChatGPT4oModelConfig = ApiModelConfig; export type Gemini2_5ModelConfig = ApiModelConfig; -export type AnyModelConfig = - | ControlLoRAModelConfig - | LoRAModelConfig - | VAEModelConfig - | ControlNetModelConfig - | IPAdapterModelConfig - | T5EncoderModelConfig - | T5EncoderBnbQuantizedLlmInt8bModelConfig - | CLIPEmbedModelConfig - | T2IAdapterModelConfig - | SpandrelImageToImageModelConfig - | TextualInversionModelConfig - | MainModelConfig - | VideoApiModelConfig - | CLIPVisionDiffusersConfig - | SigLipModelConfig - | FLUXReduxModelConfig - | LlavaOnevisionConfig - | UnknownModelConfig; +type SubmodelDefinition = S['SubmodelDefinition']; /** * Checks if a list of submodels contains any that match a given variant or type @@ -158,7 +143,7 @@ export type AnyModelConfig = * @param checkStr The string to check against for variant or type * @returns A boolean */ -const checkSubmodel = (submodels: AnyModelConfig['submodels'], checkStr: string): boolean => { +const checkSubmodel = (submodels: Record, checkStr: string): boolean => { for (const submodel in submodels) { if ( submodel && @@ -181,6 +166,7 @@ const checkSubmodels = (identifiers: string[], config: AnyModelConfig): boolean return identifiers.every( (identifier) => config.type === 'main' && + 'submodels' in config && config.submodels && (identifier in config.submodels || checkSubmodel(config.submodels, identifier)) ); @@ -349,7 +335,7 @@ export const isRefinerMainModelModelConfig = (config: AnyModelConfig): config is }; export const isFluxFillMainModelModelConfig = (config: AnyModelConfig): config is MainModelConfig => { - return config.type === 'main' && config.base === 'flux' && config.variant === 'inpaint'; + return config.type === 'main' && config.base === 'flux' && config.variant === 'dev_fill'; }; export const isTIModelConfig = (config: AnyModelConfig): config is MainModelConfig => { diff --git a/invokeai/frontend/web/src/services/events/setEventListeners.tsx b/invokeai/frontend/web/src/services/events/setEventListeners.tsx index 0cfa4cef26f..5804399f1f7 100644 --- a/invokeai/frontend/web/src/services/events/setEventListeners.tsx +++ b/invokeai/frontend/web/src/services/events/setEventListeners.tsx @@ -295,18 +295,7 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis const { id, config } = data; - if ( - config.type === 'unknown' || - config.base === 'unknown' || - /** - * Checking if type/base are 'unknown' technically narrows the config such that it's not possible for a config - * that passes to the `config.[type|base] === 'unknown'` checks. In the future, if we have more model config - * classes, this may change, so we will continue to check all three. Any one being 'unknown' is concerning - * enough to warrant a toast. - */ - /* @ts-expect-error See note above */ - config.format === 'unknown' - ) { + if (config.type === 'unknown') { toast({ id: 'UNKNOWN_MODEL', title: t('modelManager.unidentifiedModelTitle'), diff --git a/scripts/classify-model.py b/scripts/classify-model.py index 6411b4c7055..a9129860a7f 100755 --- a/scripts/classify-model.py +++ b/scripts/classify-model.py @@ -7,7 +7,8 @@ from typing import get_args from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS -from invokeai.backend.model_manager import InvalidModelConfigException, ModelConfigBase, ModelProbe +from invokeai.backend.model_manager import InvalidModelConfigException, ModelProbe +from invokeai.backend.model_manager.configs.factory import ModelConfigFactory algos = ", ".join(set(get_args(HASHING_ALGORITHMS))) @@ -30,7 +31,10 @@ def classify_with_fallback(path: Path, hash_algo: HASHING_ALGORITHMS): try: return ModelProbe.probe(path, hash_algo=hash_algo) except InvalidModelConfigException: - return ModelConfigBase.classify(path, hash_algo) + return ModelConfigFactory.from_model_on_disk( + mod=path, + hash_algo=hash_algo, + ) for path in args.model_path: diff --git a/tests/app/services/model_records/test_model_records_sql.py b/tests/app/services/model_records/test_model_records_sql.py index c8b5698dd8b..41bbc2c024d 100644 --- a/tests/app/services/model_records/test_model_records_sql.py +++ b/tests/app/services/model_records/test_model_records_sql.py @@ -21,7 +21,7 @@ ControlAdapterDefaultSettings, MainDiffusersConfig, MainModelDefaultSettings, - TextualInversionFileConfig, + TI_File_Config, VAEDiffusersConfig, ) from invokeai.backend.model_manager.taxonomy import ModelSourceType @@ -40,8 +40,8 @@ def store( return ModelRecordServiceSQL(db, logger) -def example_ti_config(key: Optional[str] = None) -> TextualInversionFileConfig: - config = TextualInversionFileConfig( +def example_ti_config(key: Optional[str] = None) -> TI_File_Config: + config = TI_File_Config( source="test/source/", source_type=ModelSourceType.Path, path="/tmp/pokemon.bin", @@ -61,7 +61,7 @@ def test_type(store: ModelRecordServiceBase): config = example_ti_config("key1") store.add_model(config) config1 = store.get_model("key1") - assert isinstance(config1, TextualInversionFileConfig) + assert isinstance(config1, TI_File_Config) def test_raises_on_violating_uniqueness(store: ModelRecordServiceBase): diff --git a/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py b/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py index ed3e05a9b26..051ed210cd5 100644 --- a/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py +++ b/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py @@ -2,7 +2,8 @@ import pytest from invokeai.backend.flux.model import Flux -from invokeai.backend.flux.util import params +from invokeai.backend.flux.util import get_flux_transformers_params +from invokeai.backend.model_manager.taxonomy import ModelVariantType from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import ( _group_state_by_submodel, is_state_dict_likely_in_flux_aitoolkit_format, @@ -44,7 +45,7 @@ def test_flux_aitoolkit_transformer_state_dict_is_in_invoke_format(): # Initialize a FLUX model on the meta device. with accelerate.init_empty_weights(): - model = Flux(params["flux-schnell"]) + model = Flux(get_flux_transformers_params(ModelVariantType.FluxSchnell)) model_keys = set(model.state_dict().keys()) for converted_key_prefix in converted_key_prefixes: diff --git a/tests/backend/patches/lora_conversions/test_flux_kohya_lora_conversion_utils.py b/tests/backend/patches/lora_conversions/test_flux_kohya_lora_conversion_utils.py index 52b8ecc9c9c..eb8846f456b 100644 --- a/tests/backend/patches/lora_conversions/test_flux_kohya_lora_conversion_utils.py +++ b/tests/backend/patches/lora_conversions/test_flux_kohya_lora_conversion_utils.py @@ -3,7 +3,8 @@ import torch from invokeai.backend.flux.model import Flux -from invokeai.backend.flux.util import params +from invokeai.backend.flux.util import get_flux_transformers_params +from invokeai.backend.model_manager.taxonomy import ModelVariantType from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils import ( _convert_flux_transformer_kohya_state_dict_to_invoke_format, is_state_dict_likely_in_flux_kohya_format, @@ -63,7 +64,7 @@ def test_convert_flux_transformer_kohya_state_dict_to_invoke_format(): # Initialize a FLUX model on the meta device. with accelerate.init_empty_weights(): - model = Flux(params["flux-dev"]) + model = Flux(get_flux_transformers_params(ModelVariantType.FluxSchnell)) model_keys = set(model.state_dict().keys()) # Assert that the converted state dict matches the keys in the actual model. diff --git a/tests/test_model_probe.py b/tests/test_model_probe.py index 8ee4f8df1f5..f1249e4dc1b 100644 --- a/tests/test_model_probe.py +++ b/tests/test_model_probe.py @@ -13,9 +13,9 @@ from invokeai.backend.model_manager import BaseModelType, ModelFormat, ModelRepoVariant, ModelType, ModelVariantType from invokeai.backend.model_manager.config import ( AnyModelConfig, + Config_Base, InvalidModelConfigException, MainDiffusersConfig, - ModelConfigBase, ModelConfigFactory, get_model_discriminator_value, ) @@ -109,13 +109,13 @@ def test_probe_sd1_diffusers_inpainting(datadir: Path): assert config.repo_variant is ModelRepoVariant.FP16 -class MinimalConfigExample(ModelConfigBase): +class MinimalConfigExample(Config_Base): type: ModelType = ModelType.Main format: ModelFormat = ModelFormat.Checkpoint fun_quote: str @classmethod - def matches(cls, mod: ModelOnDisk) -> bool: + def matches(cls, mod: ModelOnDisk, **overrides) -> bool: return mod.path.suffix == ".json" @classmethod @@ -132,7 +132,10 @@ def parse(cls, mod: ModelOnDisk) -> dict[str, Any]: def test_minimal_working_example(datadir: Path): model_path = datadir / "minimal_config_model.json" overrides = {"base": BaseModelType.StableDiffusion1} - config = ModelConfigBase.classify(model_path, **overrides) + config = ModelConfigFactory.from_model_on_disk( + mod=model_path, + overrides=overrides, + ) assert isinstance(config, MinimalConfigExample) assert config.base == BaseModelType.StableDiffusion1 @@ -160,7 +163,10 @@ def test_regression_against_model_probe(datadir: Path, override_model_loading): try: stripped_mod = StrippedModelOnDisk(path) - new_config = ModelConfigBase.classify(stripped_mod, hash=fake_hash, key=fake_key) + new_config = ModelConfigFactory.from_model_on_disk( + mod=stripped_mod, + overrides={"hash": fake_hash, "key": fake_key}, + ) except InvalidModelConfigException: pass @@ -169,10 +175,10 @@ def test_regression_against_model_probe(datadir: Path, override_model_loading): assert legacy_config.model_dump_json() == new_config.model_dump_json() elif legacy_config: - assert type(legacy_config) in ModelConfigBase.USING_LEGACY_PROBE + assert type(legacy_config) in Config_Base.USING_LEGACY_PROBE elif new_config: - assert type(new_config) in ModelConfigBase.USING_CLASSIFY_API + assert type(new_config) in Config_Base.USING_CLASSIFY_API else: raise ValueError(f"Both probe and classify failed to classify model at path {path}.") @@ -180,7 +186,7 @@ def test_regression_against_model_probe(datadir: Path, override_model_loading): config_type = type(legacy_config or new_config) configs_with_tests.add(config_type) - untested_configs = ModelConfigBase.all_config_classes() - configs_with_tests - {MinimalConfigExample} + untested_configs = Config_Base.all_config_classes() - configs_with_tests - {MinimalConfigExample} logger.warning(f"Function test_regression_against_model_probe missing test case for: {untested_configs}") @@ -200,7 +206,7 @@ def test_serialisation_roundtrip(): We need to ensure they are de-serialised into the original config with all relevant fields restored. """ excluded = {MinimalConfigExample} - for config_cls in ModelConfigBase.all_config_classes() - excluded: + for config_cls in Config_Base.all_config_classes() - excluded: trials_per_class = 50 configs_with_random_data = create_fake_configs(config_cls, trials_per_class) @@ -215,7 +221,7 @@ def test_serialisation_roundtrip(): def test_discriminator_tagging_for_config_instances(): """Verify that each ModelConfig instance is assigned the correct, unique Pydantic discriminator tag.""" excluded = {MinimalConfigExample} - config_classes = ModelConfigBase.all_config_classes() - excluded + config_classes = Config_Base.all_config_classes() - excluded tags = {c.get_tag() for c in config_classes} assert len(tags) == len(config_classes), "Each config should have its own unique tag" @@ -240,10 +246,10 @@ def test_inheritance_order(): It may be worth rethinking our config taxonomy in the future, but in the meantime this test can help prevent debugging effort. """ - for config_cls in ModelConfigBase.all_config_classes(): + for config_cls in Config_Base.all_config_classes(): excluded = {abc.ABC, pydantic.BaseModel, object} inheritance_list = [cls for cls in config_cls.mro() if cls not in excluded] - assert inheritance_list[-1] is ModelConfigBase + assert inheritance_list[-1] is Config_Base def test_any_model_config_includes_all_config_classes(): @@ -256,7 +262,7 @@ def test_any_model_config_includes_all_config_classes(): config_class, _ = get_args(annotated_pair) extracted.add(config_class) - expected = set(ModelConfigBase.all_config_classes()) - {MinimalConfigExample} + expected = set(Config_Base.all_config_classes()) - {MinimalConfigExample} assert extracted == expected @@ -264,7 +270,7 @@ def test_config_uniquely_matches_model(datadir: Path): model_paths = ModelSearch().search(datadir / "stripped_models") for path in model_paths: mod = StrippedModelOnDisk(path) - matches = {cls for cls in ModelConfigBase.USING_CLASSIFY_API if cls.matches(mod)} + matches = {cls for cls in Config_Base.USING_CLASSIFY_API if cls.matches(mod)} assert len(matches) <= 1, f"Model at path {path} matches multiple config classes: {matches}" if not matches: logger.warning(f"Model at path {path} does not match any config classes using classify API.")