Skip to content

Commit 95a103f

Browse files
setback
1 parent 84115dc commit 95a103f

31 files changed

+91
-128
lines changed

src/diffusers/loaders/ip_adapter.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -292,7 +292,8 @@ def set_ip_adapter_scale(self, scale):
292292
):
293293
if len(scale_configs) != len(attn_processor.scale):
294294
raise ValueError(
295-
f"Cannot assign {len(scale_configs)} scale_configs to {len(attn_processor.scale)} IP-Adapter."
295+
f"Cannot assign {len(scale_configs)} scale_configs to "
296+
f"{len(attn_processor.scale)} IP-Adapter."
296297
)
297298
elif len(scale_configs) == 1:
298299
scale_configs = scale_configs * len(attn_processor.scale)
@@ -591,7 +592,8 @@ def LinearStrengthModel(start, finish, size):
591592
if isinstance(attn_processor, (FluxIPAdapterJointAttnProcessor2_0)):
592593
if len(scale_configs) != len(attn_processor.scale):
593594
raise ValueError(
594-
f"Cannot assign {len(scale_configs)} scale_configs to {len(attn_processor.scale)} IP-Adapter."
595+
f"Cannot assign {len(scale_configs)} scale_configs to "
596+
f"{len(attn_processor.scale)} IP-Adapter."
595597
)
596598
elif len(scale_configs) == 1:
597599
scale_configs = scale_configs * len(attn_processor.scale)

src/diffusers/loaders/lora_conversion_utils.py

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -177,9 +177,9 @@ def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_
177177
# Store DoRA scale if present.
178178
if dora_present_in_unet:
179179
dora_scale_key_to_replace = "_lora.down." if "_lora.down." in diffusers_name else ".lora.down."
180-
unet_state_dict[diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.")] = (
181-
state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
182-
)
180+
unet_state_dict[
181+
diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.")
182+
] = state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
183183

184184
# Handle text encoder LoRAs.
185185
elif lora_name.startswith(("lora_te_", "lora_te1_", "lora_te2_")):
@@ -199,13 +199,13 @@ def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_
199199
"_lora.down." if "_lora.down." in diffusers_name else ".lora_linear_layer."
200200
)
201201
if lora_name.startswith(("lora_te_", "lora_te1_")):
202-
te_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = (
203-
state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
204-
)
202+
te_state_dict[
203+
diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")
204+
] = state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
205205
elif lora_name.startswith("lora_te2_"):
206-
te2_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = (
207-
state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
208-
)
206+
te2_state_dict[
207+
diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")
208+
] = state_dict.pop(key.replace("lora_down.weight", "dora_scale"))
209209

210210
# Store alpha if present.
211211
if lora_name_alpha in state_dict:
@@ -684,21 +684,21 @@ def swap_scale_shift(weight):
684684

685685
for lora_key in ["lora_A", "lora_B"]:
686686
## time_text_embed.timestep_embedder <- time_in
687-
converted_state_dict[f"time_text_embed.timestep_embedder.linear_1.{lora_key}.weight"] = (
688-
original_state_dict.pop(f"time_in.in_layer.{lora_key}.weight")
689-
)
687+
converted_state_dict[
688+
f"time_text_embed.timestep_embedder.linear_1.{lora_key}.weight"
689+
] = original_state_dict.pop(f"time_in.in_layer.{lora_key}.weight")
690690
if f"time_in.in_layer.{lora_key}.bias" in original_state_dict_keys:
691-
converted_state_dict[f"time_text_embed.timestep_embedder.linear_1.{lora_key}.bias"] = (
692-
original_state_dict.pop(f"time_in.in_layer.{lora_key}.bias")
693-
)
691+
converted_state_dict[
692+
f"time_text_embed.timestep_embedder.linear_1.{lora_key}.bias"
693+
] = original_state_dict.pop(f"time_in.in_layer.{lora_key}.bias")
694694

695-
converted_state_dict[f"time_text_embed.timestep_embedder.linear_2.{lora_key}.weight"] = (
696-
original_state_dict.pop(f"time_in.out_layer.{lora_key}.weight")
697-
)
695+
converted_state_dict[
696+
f"time_text_embed.timestep_embedder.linear_2.{lora_key}.weight"
697+
] = original_state_dict.pop(f"time_in.out_layer.{lora_key}.weight")
698698
if f"time_in.out_layer.{lora_key}.bias" in original_state_dict_keys:
699-
converted_state_dict[f"time_text_embed.timestep_embedder.linear_2.{lora_key}.bias"] = (
700-
original_state_dict.pop(f"time_in.out_layer.{lora_key}.bias")
701-
)
699+
converted_state_dict[
700+
f"time_text_embed.timestep_embedder.linear_2.{lora_key}.bias"
701+
] = original_state_dict.pop(f"time_in.out_layer.{lora_key}.bias")
702702

703703
## time_text_embed.text_embedder <- vector_in
704704
converted_state_dict[f"time_text_embed.text_embedder.linear_1.{lora_key}.weight"] = original_state_dict.pop(
@@ -720,21 +720,21 @@ def swap_scale_shift(weight):
720720
# guidance
721721
has_guidance = any("guidance" in k for k in original_state_dict)
722722
if has_guidance:
723-
converted_state_dict[f"time_text_embed.guidance_embedder.linear_1.{lora_key}.weight"] = (
724-
original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.weight")
725-
)
723+
converted_state_dict[
724+
f"time_text_embed.guidance_embedder.linear_1.{lora_key}.weight"
725+
] = original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.weight")
726726
if f"guidance_in.in_layer.{lora_key}.bias" in original_state_dict_keys:
727-
converted_state_dict[f"time_text_embed.guidance_embedder.linear_1.{lora_key}.bias"] = (
728-
original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.bias")
729-
)
727+
converted_state_dict[
728+
f"time_text_embed.guidance_embedder.linear_1.{lora_key}.bias"
729+
] = original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.bias")
730730

731-
converted_state_dict[f"time_text_embed.guidance_embedder.linear_2.{lora_key}.weight"] = (
732-
original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.weight")
733-
)
731+
converted_state_dict[
732+
f"time_text_embed.guidance_embedder.linear_2.{lora_key}.weight"
733+
] = original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.weight")
734734
if f"guidance_in.out_layer.{lora_key}.bias" in original_state_dict_keys:
735-
converted_state_dict[f"time_text_embed.guidance_embedder.linear_2.{lora_key}.bias"] = (
736-
original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.bias")
737-
)
735+
converted_state_dict[
736+
f"time_text_embed.guidance_embedder.linear_2.{lora_key}.bias"
737+
] = original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.bias")
738738

739739
# context_embedder
740740
converted_state_dict[f"context_embedder.{lora_key}.weight"] = original_state_dict.pop(

src/diffusers/models/model_loading_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def load_state_dict(
181181
) from e
182182
except (UnicodeDecodeError, ValueError):
183183
raise OSError(
184-
f"Unable to load weights from checkpoint file for '{checkpoint_file}' at '{checkpoint_file}'. "
184+
f"Unable to load weights from checkpoint file for '{checkpoint_file}' " f"at '{checkpoint_file}'. "
185185
)
186186

187187

src/diffusers/models/transformers/transformer_2d.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -211,9 +211,9 @@ def _init_continuous_input(self, norm_type):
211211

212212
def _init_vectorized_inputs(self, norm_type):
213213
assert self.config.sample_size is not None, "Transformer2DModel over discrete input must provide sample_size"
214-
assert self.config.num_vector_embeds is not None, (
215-
"Transformer2DModel over discrete input must provide num_embed"
216-
)
214+
assert (
215+
self.config.num_vector_embeds is not None
216+
), "Transformer2DModel over discrete input must provide num_embed"
217217

218218
self.height = self.config.sample_size
219219
self.width = self.config.sample_size

src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -650,15 +650,15 @@ def check_inputs(
650650
if padding_mask_crop is not None:
651651
if not isinstance(image, PIL.Image.Image):
652652
raise ValueError(
653-
f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}."
653+
f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}."
654654
)
655655
if not isinstance(mask_image, PIL.Image.Image):
656656
raise ValueError(
657657
f"The mask image should be a PIL image when inpainting mask crop, but is of type"
658658
f" {type(mask_image)}."
659659
)
660660
if output_type != "pil":
661-
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.")
661+
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.")
662662

663663
# `prompt` needs more sophisticated handling when there are multiple
664664
# conditionings.

src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -743,15 +743,15 @@ def check_inputs(
743743
if padding_mask_crop is not None:
744744
if not isinstance(image, PIL.Image.Image):
745745
raise ValueError(
746-
f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}."
746+
f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}."
747747
)
748748
if not isinstance(mask_image, PIL.Image.Image):
749749
raise ValueError(
750750
f"The mask image should be a PIL image when inpainting mask crop, but is of type"
751751
f" {type(mask_image)}."
752752
)
753753
if output_type != "pil":
754-
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.")
754+
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.")
755755

756756
if prompt_embeds is not None and pooled_prompt_embeds is None:
757757
raise ValueError(
@@ -1644,7 +1644,7 @@ def denoising_value_valid(dnv):
16441644
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
16451645
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
16461646
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1647-
f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
1647+
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
16481648
" `pipeline.unet` or your `mask_image` or `image` input."
16491649
)
16501650
elif num_channels_unet != 4:

src/diffusers/pipelines/controlnet/pipeline_controlnet_union_inpaint_sd_xl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -726,15 +726,15 @@ def check_inputs(
726726
if padding_mask_crop is not None:
727727
if not isinstance(image, PIL.Image.Image):
728728
raise ValueError(
729-
f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}."
729+
f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}."
730730
)
731731
if not isinstance(mask_image, PIL.Image.Image):
732732
raise ValueError(
733733
f"The mask image should be a PIL image when inpainting mask crop, but is of type"
734734
f" {type(mask_image)}."
735735
)
736736
if output_type != "pil":
737-
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.")
737+
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.")
738738

739739
if prompt_embeds is not None and pooled_prompt_embeds is None:
740740
raise ValueError(

src/diffusers/pipelines/flux/pipeline_flux_controlnet_inpainting.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -507,15 +507,15 @@ def check_inputs(
507507
if padding_mask_crop is not None:
508508
if not isinstance(image, PIL.Image.Image):
509509
raise ValueError(
510-
f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}."
510+
f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}."
511511
)
512512
if not isinstance(mask_image, PIL.Image.Image):
513513
raise ValueError(
514514
f"The mask image should be a PIL image when inpainting mask crop, but is of type"
515515
f" {type(mask_image)}."
516516
)
517517
if output_type != "pil":
518-
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.")
518+
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.")
519519

520520
if max_sequence_length is not None and max_sequence_length > 512:
521521
raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")

src/diffusers/pipelines/flux/pipeline_flux_inpaint.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -485,15 +485,15 @@ def check_inputs(
485485
if padding_mask_crop is not None:
486486
if not isinstance(image, PIL.Image.Image):
487487
raise ValueError(
488-
f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}."
488+
f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}."
489489
)
490490
if not isinstance(mask_image, PIL.Image.Image):
491491
raise ValueError(
492492
f"The mask image should be a PIL image when inpainting mask crop, but is of type"
493493
f" {type(mask_image)}."
494494
)
495495
if output_type != "pil":
496-
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.")
496+
raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.")
497497

498498
if max_sequence_length is not None and max_sequence_length > 512:
499499
raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")

src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline):
360360
"""
361361

362362
_load_connected_pipes = True
363-
model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq"
363+
model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->" "text_encoder->unet->movq"
364364
_exclude_from_cpu_offload = ["prior_prior"]
365365

366366
def __init__(

0 commit comments

Comments
 (0)