Skip to content

Commit 8785d9a

Browse files
chore: fix ruff checks
1 parent 1e72feb commit 8785d9a

File tree

8 files changed

+26
-31
lines changed

8 files changed

+26
-31
lines changed

invokeai/app/invocations/z_image_denoise.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
WithMetadata,
1919
ZImageConditioningField,
2020
)
21-
from invokeai.app.invocations.model import LoRAField, TransformerField
21+
from invokeai.app.invocations.model import TransformerField
2222
from invokeai.app.invocations.primitives import LatentsOutput
2323
from invokeai.app.services.shared.invocation_context import InvocationContext
2424
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat

invokeai/app/invocations/z_image_latents_to_image.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,7 @@ def invoke(self, context: InvocationContext) -> ImageOutput:
5555

5656
# FLUX VAE doesn't support seamless, so only apply for AutoencoderKL
5757
seamless_context = (
58-
nullcontext()
59-
if is_flux_vae
60-
else SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes)
58+
nullcontext() if is_flux_vae else SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes)
6159
)
6260

6361
with seamless_context, vae_info.model_on_device() as (_, vae):

invokeai/backend/model_manager/configs/factory.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -72,17 +72,17 @@
7272
Main_GGUF_ZImage_Config,
7373
MainModelDefaultSettings,
7474
)
75+
from invokeai.backend.model_manager.configs.qwen3_encoder import (
76+
Qwen3Encoder_Checkpoint_Config,
77+
Qwen3Encoder_GGUF_Config,
78+
Qwen3Encoder_Qwen3Encoder_Config,
79+
)
7580
from invokeai.backend.model_manager.configs.siglip import SigLIP_Diffusers_Config
7681
from invokeai.backend.model_manager.configs.spandrel import Spandrel_Checkpoint_Config
7782
from invokeai.backend.model_manager.configs.t2i_adapter import (
7883
T2IAdapter_Diffusers_SD1_Config,
7984
T2IAdapter_Diffusers_SDXL_Config,
8085
)
81-
from invokeai.backend.model_manager.configs.qwen3_encoder import (
82-
Qwen3Encoder_Checkpoint_Config,
83-
Qwen3Encoder_GGUF_Config,
84-
Qwen3Encoder_Qwen3Encoder_Config,
85-
)
8686
from invokeai.backend.model_manager.configs.t5_encoder import T5Encoder_BnBLLMint8_Config, T5Encoder_T5Encoder_Config
8787
from invokeai.backend.model_manager.configs.textual_inversion import (
8888
TI_File_SD1_Config,

invokeai/backend/model_manager/configs/qwen3_encoder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from pydantic import Field
44

55
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Config_Base
6-
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
76
from invokeai.backend.model_manager.configs.identification_utils import (
87
NotAMatchError,
98
raise_for_class_name,
@@ -13,6 +12,7 @@
1312
)
1413
from invokeai.backend.model_manager.model_on_disk import ModelOnDisk
1514
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType
15+
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
1616

1717

1818
def _has_qwen3_keys(state_dict: dict[str | int, Any]) -> bool:

invokeai/backend/model_manager/load/model_cache/torch_module_autocast/custom_modules/custom_diffusers_rms_norm.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import torch
2-
32
from diffusers.models.normalization import RMSNorm as DiffusersRMSNorm
43

54
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device

invokeai/backend/model_manager/load/model_loaders/z_image.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66

77
import accelerate
88
import torch
9-
109
from transformers import AutoTokenizer, Qwen3ForCausalLM
1110

1211
from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Diffusers_Config_Base
@@ -287,8 +286,7 @@ def _load_from_singlefile(
287286

288287
if not isinstance(config, Main_GGUF_ZImage_Config):
289288
raise TypeError(
290-
f"Expected Main_GGUF_ZImage_Config, got {type(config).__name__}. "
291-
"Model configuration type mismatch."
289+
f"Expected Main_GGUF_ZImage_Config, got {type(config).__name__}. Model configuration type mismatch."
292290
)
293291
model_path = Path(config.path)
294292

@@ -405,10 +403,11 @@ def _load_from_singlefile(
405403
self,
406404
config: AnyModelConfig,
407405
) -> AnyModel:
408-
from invokeai.backend.util.logging import InvokeAILogger
409406
from safetensors.torch import load_file
410407
from transformers import Qwen3Config, Qwen3ForCausalLM
411408

409+
from invokeai.backend.util.logging import InvokeAILogger
410+
412411
logger = InvokeAILogger.get_logger(self.__class__.__name__)
413412

414413
if not isinstance(config, Qwen3Encoder_Checkpoint_Config):
@@ -564,15 +563,15 @@ def _load_from_gguf(
564563
self,
565564
config: AnyModelConfig,
566565
) -> AnyModel:
567-
from invokeai.backend.util.logging import InvokeAILogger
568566
from transformers import Qwen3Config, Qwen3ForCausalLM
569567

568+
from invokeai.backend.util.logging import InvokeAILogger
569+
570570
logger = InvokeAILogger.get_logger(self.__class__.__name__)
571571

572572
if not isinstance(config, Qwen3Encoder_GGUF_Config):
573573
raise TypeError(
574-
f"Expected Qwen3Encoder_GGUF_Config, got {type(config).__name__}. "
575-
"Model configuration type mismatch."
574+
f"Expected Qwen3Encoder_GGUF_Config, got {type(config).__name__}. Model configuration type mismatch."
576575
)
577576
model_path = Path(config.path)
578577

invokeai/backend/patches/layer_patcher.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,9 +175,7 @@ def _apply_model_layer_patch(
175175

176176
# TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA
177177
# devices here. Experimentally, it was found to be very slow on CPU. More investigation needed.
178-
params_dict = patch.get_parameters(
179-
dict(module_to_patch.named_parameters(recurse=False)), weight=patch_weight
180-
)
178+
params_dict = patch.get_parameters(dict(module_to_patch.named_parameters(recurse=False)), weight=patch_weight)
181179
if not params_dict:
182180
logger = InvokeAILogger.get_logger(LayerPatcher.__name__)
183181
logger.warning(f"LoRA patch returned no parameters for module: {module_to_patch_key}")

invokeai/backend/patches/lora_conversions/z_image_lora_conversion_utils.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,17 +28,18 @@ def is_state_dict_likely_z_image_lora(state_dict: dict[str | int, torch.Tensor])
2828
# Check for Z-Image transformer keys (S3-DiT architecture)
2929
# Various training frameworks use different prefixes
3030
has_transformer_keys = any(
31-
k.startswith((
32-
"transformer.",
33-
"base_model.model.transformer.",
34-
"diffusion_model.",
35-
)) for k in str_keys
31+
k.startswith(
32+
(
33+
"transformer.",
34+
"base_model.model.transformer.",
35+
"diffusion_model.",
36+
)
37+
)
38+
for k in str_keys
3639
)
3740

3841
# Check for Qwen3 text encoder keys
39-
has_qwen3_keys = any(
40-
k.startswith(("text_encoder.", "base_model.model.text_encoder.")) for k in str_keys
41-
)
42+
has_qwen3_keys = any(k.startswith(("text_encoder.", "base_model.model.text_encoder.")) for k in str_keys)
4243

4344
return has_transformer_keys or has_qwen3_keys
4445

@@ -96,15 +97,15 @@ def lora_model_from_z_image_state_dict(
9697
# Check and strip text encoder prefixes first
9798
for prefix in text_encoder_prefixes:
9899
if layer_key.startswith(prefix):
99-
clean_key = layer_key[len(prefix):]
100+
clean_key = layer_key[len(prefix) :]
100101
is_text_encoder = True
101102
break
102103

103104
# If not text encoder, check transformer prefixes
104105
if not is_text_encoder:
105106
for prefix in transformer_prefixes:
106107
if layer_key.startswith(prefix):
107-
clean_key = layer_key[len(prefix):]
108+
clean_key = layer_key[len(prefix) :]
108109
break
109110

110111
# Apply the appropriate internal prefix

0 commit comments

Comments
 (0)