Skip to content

Commit 56fd7bc

Browse files
authored
docs(z-image) add Z-Image requirements and starter bundle (#8734)
* docs(z-image) add minimum requirements for Z-Image and create Z-Image starter bundle * fix(model manager) add flux VAE to Z-Image bundle * docs(model manager) remove out-of-date model info link * chore: fix frontendchecks * chore: lint:prettier * docs(model manager): clarify minimum hardware for z-image turbo * (fix) add flux VAE to ZIT starter dependencies & tweak UI docs
1 parent 9f8f996 commit 56fd7bc

File tree

7 files changed

+41
-15
lines changed

7 files changed

+41
-15
lines changed

docs/installation/quick_start.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,12 @@ Hardware requirements vary significantly depending on model and image output siz
3131
- Memory: At least 32GB RAM.
3232
- Disk: 10GB for base installation plus 200GB for models.
3333

34+
=== "Z-Image Turbo - 1024x1024"
35+
- GPU: Nvidia 20xx series or later, 8GB+ VRAM for the Q4_K quantized model. 16GB+ needed for the Q8 or BF16 models.
36+
- Memory: At least 16GB RAM.
37+
- Disk: 10GB for base installation plus 35GB for models.
38+
39+
3440
More detail on system requirements can be found [here](./requirements.md).
3541

3642
## Step 2: Download and Set Up the Launcher

docs/installation/requirements.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,11 @@ The requirements below are rough guidelines for best performance. GPUs with less
3131
- Memory: At least 32GB RAM.
3232
- Disk: 10GB for base installation plus 200GB for models.
3333

34+
=== "Z-Image Turbo - 1024x1024"
35+
- GPU: Nvidia 20xx series or later, 8GB+ VRAM for the Q4_K quantized model. 16GB+ needed for the Q8 or BF16 models.
36+
- Memory: At least 16GB RAM.
37+
- Disk: 10GB for base installation plus 35GB for models.
38+
3439
!!! info "`tmpfs` on Linux"
3540

3641
If your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.

invokeai/backend/model_manager/load/load_default.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ def _load_and_cache(self, config: AnyModelConfig, submodel_type: Optional[SubMod
7575

7676
config.path = str(self._get_model_path(config))
7777
self._ram_cache.make_room(self.get_size_fs(config, Path(config.path), submodel_type))
78+
self._logger.info(f"Loading model '{stats_name}' into RAM cache..., config={config}")
7879
loaded_model = self._load_model(config, submodel_type)
7980

8081
self._ram_cache.put(

invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ def _load_from_singlefile(
140140
# Some weights of the model checkpoint were not used when initializing CLIPTextModelWithProjection:
141141
# ['text_model.embeddings.position_ids']
142142

143+
self._logger.info(f"Loading model from single file at {config.path} using {load_class.__name__}")
143144
with SilenceWarnings():
144145
pipeline = load_class.from_single_file(config.path, torch_dtype=self._torch_dtype)
145146

invokeai/backend/model_manager/starter_models.py

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -720,20 +720,20 @@ class StarterModelBundle(BaseModel):
720720
name="Z-Image Turbo (quantized)",
721721
base=BaseModelType.ZImage,
722722
source="https://huggingface.co/leejet/Z-Image-Turbo-GGUF/resolve/main/z_image_turbo-Q4_K.gguf",
723-
description="Z-Image Turbo quantized to GGUF Q4_K format. Requires separate Qwen3 text encoder. ~4GB",
723+
description="Z-Image Turbo quantized to GGUF Q4_K format. Requires standalone Qwen3 text encoder and Flux VAE. ~4GB",
724724
type=ModelType.Main,
725725
format=ModelFormat.GGUFQuantized,
726-
dependencies=[z_image_qwen3_encoder_quantized],
726+
dependencies=[z_image_qwen3_encoder_quantized, flux_vae],
727727
)
728728

729729
z_image_turbo_q8 = StarterModel(
730730
name="Z-Image Turbo (Q8)",
731731
base=BaseModelType.ZImage,
732732
source="https://huggingface.co/leejet/Z-Image-Turbo-GGUF/resolve/main/z_image_turbo-Q8_0.gguf",
733-
description="Z-Image Turbo quantized to GGUF Q8_0 format. Higher quality, larger size. Requires separate Qwen3 text encoder. ~6.6GB",
733+
description="Z-Image Turbo quantized to GGUF Q8_0 format. Higher quality, larger size. Requires standalone Qwen3 text encoder and Flux VAE. ~6.6GB",
734734
type=ModelType.Main,
735735
format=ModelFormat.GGUFQuantized,
736-
dependencies=[z_image_qwen3_encoder_quantized],
736+
dependencies=[z_image_qwen3_encoder_quantized, flux_vae],
737737
)
738738

739739
z_image_controlnet_union = StarterModel(
@@ -890,10 +890,19 @@ class StarterModelBundle(BaseModel):
890890
flux_krea_quantized,
891891
]
892892

893+
zimage_bundle: list[StarterModel] = [
894+
z_image_turbo_quantized,
895+
z_image_qwen3_encoder_quantized,
896+
z_image_controlnet_union,
897+
z_image_controlnet_tile,
898+
flux_vae,
899+
]
900+
893901
STARTER_BUNDLES: dict[str, StarterModelBundle] = {
894902
BaseModelType.StableDiffusion1: StarterModelBundle(name="Stable Diffusion 1.5", models=sd1_bundle),
895903
BaseModelType.StableDiffusionXL: StarterModelBundle(name="SDXL", models=sdxl_bundle),
896904
BaseModelType.Flux: StarterModelBundle(name="FLUX.1 dev", models=flux_bundle),
905+
BaseModelType.ZImage: StarterModelBundle(name="Z-Image Turbo", models=zimage_bundle),
897906
}
898907

899908
assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models"

invokeai/frontend/web/public/locales/en.json

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,10 +1049,10 @@
10491049
"t5Encoder": "T5 Encoder",
10501050
"qwen3Encoder": "Qwen3 Encoder",
10511051
"zImageVae": "VAE (optional)",
1052-
"zImageVaePlaceholder": "From Qwen3 Source (leave empty)",
1052+
"zImageVaePlaceholder": "From VAE source model",
10531053
"zImageQwen3Encoder": "Qwen3 Encoder (optional)",
1054-
"zImageQwen3EncoderPlaceholder": "From Qwen3 Source (leave empty)",
1055-
"zImageQwen3Source": "Qwen3 Source (Diffusers Z-Image)",
1054+
"zImageQwen3EncoderPlaceholder": "From Qwen3 source model",
1055+
"zImageQwen3Source": "Qwen3 & VAE Source Model",
10561056
"zImageQwen3SourcePlaceholder": "Required if VAE/Encoder empty",
10571057
"upcastAttention": "Upcast Attention",
10581058
"uploadImage": "Upload Image",

invokeai/frontend/web/src/features/modelManagerV2/subpanels/InstallModels.tsx

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import type { SystemStyleObject } from '@invoke-ai/ui-library';
2-
import { Box, Button, Flex, Heading, Tab, TabList, TabPanel, TabPanels, Tabs, Text } from '@invoke-ai/ui-library';
2+
import { Box, Flex, Heading, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/ui-library';
33
import { useStore } from '@nanostores/react';
44
import { $installModelsTabIndex } from 'features/modelManagerV2/store/installModelsStore';
55
import { StarterModelsForm } from 'features/modelManagerV2/subpanels/AddModelPanel/StarterModels/StarterModelsForm';
6-
import { memo, useCallback } from 'react';
6+
import { memo } from 'react';
77
import { useTranslation } from 'react-i18next';
8-
import { PiCubeBold, PiFolderOpenBold, PiInfoBold, PiLinkSimpleBold, PiShootingStarBold } from 'react-icons/pi';
8+
import { PiCubeBold, PiFolderOpenBold, PiLinkSimpleBold, PiShootingStarBold } from 'react-icons/pi';
99
import { SiHuggingface } from 'react-icons/si';
1010

1111
import { HuggingFaceForm } from './AddModelPanel/HuggingFaceFolder/HuggingFaceForm';
@@ -24,17 +24,21 @@ export const InstallModels = memo(() => {
2424
const { t } = useTranslation();
2525
const tabIndex = useStore($installModelsTabIndex);
2626

27-
const onClickLearnMore = useCallback(() => {
28-
window.open('https://support.invoke.ai/support/solutions/articles/151000170961-supported-models');
29-
}, []);
27+
{
28+
/* TO DO: This click target points to an out-of-date invokeai.ai URL. Reinstate when there is an updated web link. */
29+
}
30+
// const onClickLearnMore = useCallback(() => {
31+
// window.open('https://support.invoke.ai/support/solutions/articles/151000170961-supported-models');
32+
// }, []);
3033

3134
return (
3235
<Flex layerStyle="first" borderRadius="base" w="full" h="full" flexDir="column" gap={4}>
3336
<Flex alignItems="center" justifyContent="space-between">
3437
<Heading fontSize="xl">{t('modelManager.addModel')}</Heading>
35-
<Button alignItems="center" variant="link" leftIcon={<PiInfoBold />} onClick={onClickLearnMore}>
38+
{/* TO DO: This button points to an out-of-date invokeai.ai URL. Reinstate when there is an updated web link. */}
39+
{/* <Button alignItems="center" variant="link" leftIcon={<PiInfoBold />} onClick={onClickLearnMore}>
3640
<Text variant="subtext">{t('modelManager.learnMoreAboutSupportedModels')}</Text>
37-
</Button>
41+
</Button> */}
3842
</Flex>
3943
<Tabs
4044
variant="line"

0 commit comments

Comments
 (0)