Skip to content

Commit 38022f1

Browse files
authored
Merge branch 'main' into load-lora-adapter
2 parents f20761f + 243d9a4 commit 38022f1

14 files changed

+154
-75
lines changed

src/diffusers/loaders/ip_adapter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def load_ip_adapter(
187187
state_dict = pretrained_model_name_or_path_or_dict
188188

189189
keys = list(state_dict.keys())
190-
if keys != ["image_proj", "ip_adapter"]:
190+
if "image_proj" not in keys and "ip_adapter" not in keys:
191191
raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
192192

193193
state_dicts.append(state_dict)

src/diffusers/models/attention_processor.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1908,7 +1908,9 @@ def __call__(
19081908
query = apply_rotary_emb(query, image_rotary_emb)
19091909
key = apply_rotary_emb(key, image_rotary_emb)
19101910

1911-
hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
1911+
hidden_states = F.scaled_dot_product_attention(
1912+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
1913+
)
19121914
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
19131915
hidden_states = hidden_states.to(query.dtype)
19141916

src/diffusers/models/controlnets/controlnet_sd3.py

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -393,13 +393,19 @@ def custom_forward(*inputs):
393393
return custom_forward
394394

395395
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
396-
encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint(
397-
create_custom_forward(block),
398-
hidden_states,
399-
encoder_hidden_states,
400-
temb,
401-
**ckpt_kwargs,
402-
)
396+
if self.context_embedder is not None:
397+
encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint(
398+
create_custom_forward(block),
399+
hidden_states,
400+
encoder_hidden_states,
401+
temb,
402+
**ckpt_kwargs,
403+
)
404+
else:
405+
# SD3.5 8b controlnet use single transformer block, which does not use `encoder_hidden_states`
406+
hidden_states = torch.utils.checkpoint.checkpoint(
407+
create_custom_forward(block), hidden_states, temb, **ckpt_kwargs
408+
)
403409

404410
else:
405411
if self.context_embedder is not None:

src/diffusers/models/transformers/transformer_sd3.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616
from typing import Any, Dict, List, Optional, Tuple, Union
1717

18-
import numpy as np
1918
import torch
2019
import torch.nn as nn
2120
import torch.nn.functional as F
@@ -424,8 +423,7 @@ def custom_forward(*inputs):
424423
# controlnet residual
425424
if block_controlnet_hidden_states is not None and block.context_pre_only is False:
426425
interval_control = len(self.transformer_blocks) / len(block_controlnet_hidden_states)
427-
interval_control = int(np.ceil(interval_control))
428-
hidden_states = hidden_states + block_controlnet_hidden_states[index_block // interval_control]
426+
hidden_states = hidden_states + block_controlnet_hidden_states[int(index_block / interval_control)]
429427

430428
hidden_states = self.norm_out(hidden_states, temb)
431429
hidden_states = self.proj_out(hidden_states)

src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,6 @@ def __call__(
387387
prompt: Union[str, List[str]] = None,
388388
negative_prompt: Union[str, List[str]] = None,
389389
num_inference_steps: int = 50,
390-
timesteps: List[int] = None,
391390
sigmas: List[float] = None,
392391
guidance_scale: float = 3.5,
393392
num_images_per_prompt: Optional[int] = 1,
@@ -424,10 +423,6 @@ def __call__(
424423
sigmas (`List[float]`, *optional*):
425424
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
426425
`num_inference_steps` and `timesteps` must be `None`.
427-
timesteps (`List[int]`, *optional*):
428-
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
429-
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
430-
passed will be used. Must be in descending order.
431426
guidance_scale (`float`, *optional*, defaults to 5.0):
432427
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
433428
`guidance_scale` is defined as `w` of equation 2. of [Imagen
@@ -522,9 +517,7 @@ def __call__(
522517
# 4. Prepare timesteps
523518

524519
# sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
525-
timesteps, num_inference_steps = retrieve_timesteps(
526-
self.scheduler, num_inference_steps, device, timesteps, sigmas
527-
)
520+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas)
528521

529522
# 5. Prepare latents.
530523
latent_channels = self.transformer.config.in_channels

src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -733,7 +733,7 @@ def __call__(
733733
height: Optional[int] = None,
734734
width: Optional[int] = None,
735735
num_inference_steps: int = 28,
736-
timesteps: List[int] = None,
736+
sigmas: Optional[List[float]] = None,
737737
guidance_scale: float = 7.0,
738738
control_guidance_start: Union[float, List[float]] = 0.0,
739739
control_guidance_end: Union[float, List[float]] = 1.0,
@@ -778,10 +778,10 @@ def __call__(
778778
num_inference_steps (`int`, *optional*, defaults to 50):
779779
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
780780
expense of slower inference.
781-
timesteps (`List[int]`, *optional*):
782-
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
783-
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
784-
passed will be used. Must be in descending order.
781+
sigmas (`List[float]`, *optional*):
782+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
783+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
784+
will be used.
785785
guidance_scale (`float`, *optional*, defaults to 5.0):
786786
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
787787
`guidance_scale` is defined as `w` of equation 2. of [Imagen
@@ -998,7 +998,7 @@ def __call__(
998998
assert False
999999

10001000
# 4. Prepare timesteps
1001-
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1001+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas)
10021002
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
10031003
self._num_timesteps = len(timesteps)
10041004

src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,7 @@ def __call__(
787787
height: Optional[int] = None,
788788
width: Optional[int] = None,
789789
num_inference_steps: int = 28,
790-
timesteps: List[int] = None,
790+
sigmas: Optional[List[float]] = None,
791791
guidance_scale: float = 7.0,
792792
control_guidance_start: Union[float, List[float]] = 0.0,
793793
control_guidance_end: Union[float, List[float]] = 1.0,
@@ -833,10 +833,10 @@ def __call__(
833833
num_inference_steps (`int`, *optional*, defaults to 50):
834834
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
835835
expense of slower inference.
836-
timesteps (`List[int]`, *optional*):
837-
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
838-
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
839-
passed will be used. Must be in descending order.
836+
sigmas (`List[float]`, *optional*):
837+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
838+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
839+
will be used.
840840
guidance_scale (`float`, *optional*, defaults to 5.0):
841841
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
842842
`guidance_scale` is defined as `w` of equation 2. of [Imagen
@@ -1033,7 +1033,7 @@ def __call__(
10331033
controlnet_pooled_projections = controlnet_pooled_projections or pooled_prompt_embeds
10341034

10351035
# 4. Prepare timesteps
1036-
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1036+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas)
10371037
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
10381038
self._num_timesteps = len(timesteps)
10391039

src/diffusers/pipelines/flux/pipeline_flux_prior_redux.py

Lines changed: 92 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,45 @@ def __init__(
142142
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
143143
)
144144

145+
def check_inputs(
146+
self,
147+
image,
148+
prompt,
149+
prompt_2,
150+
prompt_embeds=None,
151+
pooled_prompt_embeds=None,
152+
prompt_embeds_scale=1.0,
153+
pooled_prompt_embeds_scale=1.0,
154+
):
155+
if prompt is not None and prompt_embeds is not None:
156+
raise ValueError(
157+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
158+
" only forward one of the two."
159+
)
160+
elif prompt_2 is not None and prompt_embeds is not None:
161+
raise ValueError(
162+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
163+
" only forward one of the two."
164+
)
165+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
166+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
167+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
168+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
169+
if prompt is not None and (isinstance(prompt, list) and isinstance(image, list) and len(prompt) != len(image)):
170+
raise ValueError(
171+
f"number of prompts must be equal to number of images, but {len(prompt)} prompts were provided and {len(image)} images"
172+
)
173+
if prompt_embeds is not None and pooled_prompt_embeds is None:
174+
raise ValueError(
175+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
176+
)
177+
if isinstance(prompt_embeds_scale, list) and (
178+
isinstance(image, list) and len(prompt_embeds_scale) != len(image)
179+
):
180+
raise ValueError(
181+
f"number of weights must be equal to number of images, but {len(prompt_embeds_scale)} weights were provided and {len(image)} images"
182+
)
183+
145184
def encode_image(self, image, device, num_images_per_prompt):
146185
dtype = next(self.image_encoder.parameters()).dtype
147186
image = self.feature_extractor.preprocess(
@@ -334,6 +373,12 @@ def encode_prompt(
334373
def __call__(
335374
self,
336375
image: PipelineImageInput,
376+
prompt: Union[str, List[str]] = None,
377+
prompt_2: Optional[Union[str, List[str]]] = None,
378+
prompt_embeds: Optional[torch.FloatTensor] = None,
379+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
380+
prompt_embeds_scale: Optional[Union[float, List[float]]] = 1.0,
381+
pooled_prompt_embeds_scale: Optional[Union[float, List[float]]] = 1.0,
337382
return_dict: bool = True,
338383
):
339384
r"""
@@ -345,6 +390,16 @@ def __call__(
345390
numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list
346391
or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
347392
list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`
393+
prompt (`str` or `List[str]`, *optional*):
394+
The prompt or prompts to guide the image generation. **experimental feature**: to use this feature,
395+
make sure to explicitly load text encoders to the pipeline. Prompts will be ignored if text encoders
396+
are not loaded.
397+
prompt_2 (`str` or `List[str]`, *optional*):
398+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`.
399+
prompt_embeds (`torch.FloatTensor`, *optional*):
400+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
401+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
402+
Pre-generated pooled text embeddings.
348403
return_dict (`bool`, *optional*, defaults to `True`):
349404
Whether or not to return a [`~pipelines.flux.FluxPriorReduxPipelineOutput`] instead of a plain tuple.
350405
@@ -356,13 +411,31 @@ def __call__(
356411
returning a tuple, the first element is a list with the generated images.
357412
"""
358413

414+
# 1. Check inputs. Raise error if not correct
415+
self.check_inputs(
416+
image,
417+
prompt,
418+
prompt_2,
419+
prompt_embeds=prompt_embeds,
420+
pooled_prompt_embeds=pooled_prompt_embeds,
421+
prompt_embeds_scale=prompt_embeds_scale,
422+
pooled_prompt_embeds_scale=pooled_prompt_embeds_scale,
423+
)
424+
359425
# 2. Define call parameters
360426
if image is not None and isinstance(image, Image.Image):
361427
batch_size = 1
362428
elif image is not None and isinstance(image, list):
363429
batch_size = len(image)
364430
else:
365431
batch_size = image.shape[0]
432+
if prompt is not None and isinstance(prompt, str):
433+
prompt = batch_size * [prompt]
434+
if isinstance(prompt_embeds_scale, float):
435+
prompt_embeds_scale = batch_size * [prompt_embeds_scale]
436+
if isinstance(pooled_prompt_embeds_scale, float):
437+
pooled_prompt_embeds_scale = batch_size * [pooled_prompt_embeds_scale]
438+
366439
device = self._execution_device
367440

368441
# 3. Prepare image embeddings
@@ -378,24 +451,38 @@ def __call__(
378451
pooled_prompt_embeds,
379452
_,
380453
) = self.encode_prompt(
381-
prompt=[""] * batch_size,
382-
prompt_2=None,
383-
prompt_embeds=None,
384-
pooled_prompt_embeds=None,
454+
prompt=prompt,
455+
prompt_2=prompt_2,
456+
prompt_embeds=prompt_embeds,
457+
pooled_prompt_embeds=pooled_prompt_embeds,
385458
device=device,
386459
num_images_per_prompt=1,
387460
max_sequence_length=512,
388461
lora_scale=None,
389462
)
390463
else:
464+
if prompt is not None:
465+
logger.warning(
466+
"prompt input is ignored when text encoders are not loaded to the pipeline. "
467+
"Make sure to explicitly load the text encoders to enable prompt input. "
468+
)
391469
# max_sequence_length is 512, t5 encoder hidden size is 4096
392470
prompt_embeds = torch.zeros((batch_size, 512, 4096), device=device, dtype=image_embeds.dtype)
393471
# pooled_prompt_embeds is 768, clip text encoder hidden size
394472
pooled_prompt_embeds = torch.zeros((batch_size, 768), device=device, dtype=image_embeds.dtype)
395473

396-
# Concatenate image and text embeddings
474+
# scale & concatenate image and text embeddings
397475
prompt_embeds = torch.cat([prompt_embeds, image_embeds], dim=1)
398476

477+
prompt_embeds *= torch.tensor(prompt_embeds_scale, device=device, dtype=image_embeds.dtype)[:, None, None]
478+
pooled_prompt_embeds *= torch.tensor(pooled_prompt_embeds_scale, device=device, dtype=image_embeds.dtype)[
479+
:, None
480+
]
481+
482+
# weighted sum
483+
prompt_embeds = torch.sum(prompt_embeds, dim=0, keepdim=True)
484+
pooled_prompt_embeds = torch.sum(pooled_prompt_embeds, dim=0, keepdim=True)
485+
399486
# Offload all models
400487
self.maybe_free_model_hooks()
401488

src/diffusers/pipelines/lumina/pipeline_lumina.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -617,7 +617,6 @@ def __call__(
617617
width: Optional[int] = None,
618618
height: Optional[int] = None,
619619
num_inference_steps: int = 30,
620-
timesteps: List[int] = None,
621620
guidance_scale: float = 4.0,
622621
negative_prompt: Union[str, List[str]] = None,
623622
sigmas: List[float] = None,
@@ -649,10 +648,6 @@ def __call__(
649648
num_inference_steps (`int`, *optional*, defaults to 30):
650649
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
651650
expense of slower inference.
652-
timesteps (`List[int]`, *optional*):
653-
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
654-
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
655-
passed will be used. Must be in descending order.
656651
sigmas (`List[float]`, *optional*):
657652
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
658653
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
@@ -776,9 +771,7 @@ def __call__(
776771
prompt_attention_mask = torch.cat([prompt_attention_mask, negative_prompt_attention_mask], dim=0)
777772

778773
# 4. Prepare timesteps
779-
timesteps, num_inference_steps = retrieve_timesteps(
780-
self.scheduler, num_inference_steps, device, timesteps, sigmas
781-
)
774+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas)
782775

783776
# 5. Prepare latents.
784777
latent_channels = self.transformer.config.in_channels

0 commit comments

Comments
 (0)