diff --git a/docs/installation/quick_start.md b/docs/installation/quick_start.md index 0da0689abfc..572289e9c1e 100644 --- a/docs/installation/quick_start.md +++ b/docs/installation/quick_start.md @@ -31,6 +31,12 @@ Hardware requirements vary significantly depending on model and image output siz - Memory: At least 32GB RAM. - Disk: 10GB for base installation plus 200GB for models. + === "Z-Image Turbo - 1024x1024" + - GPU: Nvidia 20xx series or later, 8GB+ VRAM for the Q4_K quantized model. 16GB+ needed for the Q8 or BF16 models. + - Memory: At least 16GB RAM. + - Disk: 10GB for base installation plus 35GB for models. + + More detail on system requirements can be found [here](./requirements.md). ## Step 2: Download and Set Up the Launcher diff --git a/docs/installation/requirements.md b/docs/installation/requirements.md index 4b52147e8cd..132ac31ed31 100644 --- a/docs/installation/requirements.md +++ b/docs/installation/requirements.md @@ -31,6 +31,11 @@ The requirements below are rough guidelines for best performance. GPUs with less - Memory: At least 32GB RAM. - Disk: 10GB for base installation plus 200GB for models. + === "Z-Image Turbo - 1024x1024" + - GPU: Nvidia 20xx series or later, 8GB+ VRAM for the Q4_K quantized model. 16GB+ needed for the Q8 or BF16 models. + - Memory: At least 16GB RAM. + - Disk: 10GB for base installation plus 35GB for models. + !!! info "`tmpfs` on Linux" If your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space. diff --git a/invokeai/backend/model_manager/load/load_default.py b/invokeai/backend/model_manager/load/load_default.py index 3fb7a574f31..84f42e12243 100644 --- a/invokeai/backend/model_manager/load/load_default.py +++ b/invokeai/backend/model_manager/load/load_default.py @@ -75,6 +75,7 @@ def _load_and_cache(self, config: AnyModelConfig, submodel_type: Optional[SubMod config.path = str(self._get_model_path(config)) self._ram_cache.make_room(self.get_size_fs(config, Path(config.path), submodel_type)) + self._logger.info(f"Loading model '{stats_name}' into RAM cache..., config={config}") loaded_model = self._load_model(config, submodel_type) self._ram_cache.put( diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py index 0e11cd4191d..4c05911019d 100644 --- a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -140,6 +140,7 @@ def _load_from_singlefile( # Some weights of the model checkpoint were not used when initializing CLIPTextModelWithProjection: # ['text_model.embeddings.position_ids'] + self._logger.info(f"Loading model from single file at {config.path} using {load_class.__name__}") with SilenceWarnings(): pipeline = load_class.from_single_file(config.path, torch_dtype=self._torch_dtype) diff --git a/invokeai/backend/model_manager/starter_models.py b/invokeai/backend/model_manager/starter_models.py index 1b972b2fd16..89d9666e83c 100644 --- a/invokeai/backend/model_manager/starter_models.py +++ b/invokeai/backend/model_manager/starter_models.py @@ -720,20 +720,20 @@ class StarterModelBundle(BaseModel): name="Z-Image Turbo (quantized)", base=BaseModelType.ZImage, source="https://huggingface.co/leejet/Z-Image-Turbo-GGUF/resolve/main/z_image_turbo-Q4_K.gguf", - description="Z-Image Turbo quantized to GGUF Q4_K format. Requires separate Qwen3 text encoder. ~4GB", + description="Z-Image Turbo quantized to GGUF Q4_K format. Requires standalone Qwen3 text encoder and Flux VAE. ~4GB", type=ModelType.Main, format=ModelFormat.GGUFQuantized, - dependencies=[z_image_qwen3_encoder_quantized], + dependencies=[z_image_qwen3_encoder_quantized, flux_vae], ) z_image_turbo_q8 = StarterModel( name="Z-Image Turbo (Q8)", base=BaseModelType.ZImage, source="https://huggingface.co/leejet/Z-Image-Turbo-GGUF/resolve/main/z_image_turbo-Q8_0.gguf", - description="Z-Image Turbo quantized to GGUF Q8_0 format. Higher quality, larger size. Requires separate Qwen3 text encoder. ~6.6GB", + description="Z-Image Turbo quantized to GGUF Q8_0 format. Higher quality, larger size. Requires standalone Qwen3 text encoder and Flux VAE. ~6.6GB", type=ModelType.Main, format=ModelFormat.GGUFQuantized, - dependencies=[z_image_qwen3_encoder_quantized], + dependencies=[z_image_qwen3_encoder_quantized, flux_vae], ) z_image_controlnet_union = StarterModel( @@ -890,10 +890,19 @@ class StarterModelBundle(BaseModel): flux_krea_quantized, ] +zimage_bundle: list[StarterModel] = [ + z_image_turbo_quantized, + z_image_qwen3_encoder_quantized, + z_image_controlnet_union, + z_image_controlnet_tile, + flux_vae, +] + STARTER_BUNDLES: dict[str, StarterModelBundle] = { BaseModelType.StableDiffusion1: StarterModelBundle(name="Stable Diffusion 1.5", models=sd1_bundle), BaseModelType.StableDiffusionXL: StarterModelBundle(name="SDXL", models=sdxl_bundle), BaseModelType.Flux: StarterModelBundle(name="FLUX.1 dev", models=flux_bundle), + BaseModelType.ZImage: StarterModelBundle(name="Z-Image Turbo", models=zimage_bundle), } assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models" diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 948a858b8d9..881d7253270 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1049,10 +1049,10 @@ "t5Encoder": "T5 Encoder", "qwen3Encoder": "Qwen3 Encoder", "zImageVae": "VAE (optional)", - "zImageVaePlaceholder": "From Qwen3 Source (leave empty)", + "zImageVaePlaceholder": "From VAE source model", "zImageQwen3Encoder": "Qwen3 Encoder (optional)", - "zImageQwen3EncoderPlaceholder": "From Qwen3 Source (leave empty)", - "zImageQwen3Source": "Qwen3 Source (Diffusers Z-Image)", + "zImageQwen3EncoderPlaceholder": "From Qwen3 source model", + "zImageQwen3Source": "Qwen3 & VAE Source Model", "zImageQwen3SourcePlaceholder": "Required if VAE/Encoder empty", "upcastAttention": "Upcast Attention", "uploadImage": "Upload Image", diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/InstallModels.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/InstallModels.tsx index eed27c5fd60..9039c0f85f4 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/InstallModels.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/InstallModels.tsx @@ -1,11 +1,11 @@ import type { SystemStyleObject } from '@invoke-ai/ui-library'; -import { Box, Button, Flex, Heading, Tab, TabList, TabPanel, TabPanels, Tabs, Text } from '@invoke-ai/ui-library'; +import { Box, Flex, Heading, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/ui-library'; import { useStore } from '@nanostores/react'; import { $installModelsTabIndex } from 'features/modelManagerV2/store/installModelsStore'; import { StarterModelsForm } from 'features/modelManagerV2/subpanels/AddModelPanel/StarterModels/StarterModelsForm'; -import { memo, useCallback } from 'react'; +import { memo } from 'react'; import { useTranslation } from 'react-i18next'; -import { PiCubeBold, PiFolderOpenBold, PiInfoBold, PiLinkSimpleBold, PiShootingStarBold } from 'react-icons/pi'; +import { PiCubeBold, PiFolderOpenBold, PiLinkSimpleBold, PiShootingStarBold } from 'react-icons/pi'; import { SiHuggingface } from 'react-icons/si'; import { HuggingFaceForm } from './AddModelPanel/HuggingFaceFolder/HuggingFaceForm'; @@ -24,17 +24,21 @@ export const InstallModels = memo(() => { const { t } = useTranslation(); const tabIndex = useStore($installModelsTabIndex); - const onClickLearnMore = useCallback(() => { - window.open('https://support.invoke.ai/support/solutions/articles/151000170961-supported-models'); - }, []); + { + /* TO DO: This click target points to an out-of-date invokeai.ai URL. Reinstate when there is an updated web link. */ + } + // const onClickLearnMore = useCallback(() => { + // window.open('https://support.invoke.ai/support/solutions/articles/151000170961-supported-models'); + // }, []); return ( {t('modelManager.addModel')} - + */}