Skip to content

Commit 190a641

Browse files
authored
Merge branch 'main' into safety_checker
2 parents b52e22d + 1d2204d commit 190a641

File tree

58 files changed

+607
-146
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+607
-146
lines changed

docs/source/en/api/pipelines/controlnet_sd3.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ This controlnet code is mainly implemented by [The InstantX Team](https://huggin
2828
| ControlNet type | Developer | Link |
2929
| -------- | ---------- | ---- |
3030
| Canny | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/SD3-Controlnet-Canny) |
31+
| Depth | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/SD3-Controlnet-Depth) |
3132
| Pose | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/SD3-Controlnet-Pose) |
3233
| Tile | [The InstantX Team](https://huggingface.co/InstantX) | [Link](https://huggingface.co/InstantX/SD3-Controlnet-Tile) |
3334
| Inpainting | [The AlimamaCreative Team](https://huggingface.co/alimama-creative) | [link](https://huggingface.co/alimama-creative/SD3-Controlnet-Inpainting) |

examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py

Lines changed: 34 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
4040
from huggingface_hub import create_repo, upload_folder
4141
from packaging import version
42-
from peft import LoraConfig
42+
from peft import LoraConfig, set_peft_model_state_dict
4343
from peft.utils import get_peft_model_state_dict
4444
from PIL import Image
4545
from PIL.ImageOps import exif_transpose
@@ -59,12 +59,13 @@
5959
)
6060
from diffusers.loaders import StableDiffusionLoraLoaderMixin
6161
from diffusers.optimization import get_scheduler
62-
from diffusers.training_utils import compute_snr
62+
from diffusers.training_utils import _set_state_dict_into_text_encoder, cast_training_params, compute_snr
6363
from diffusers.utils import (
6464
check_min_version,
6565
convert_all_state_dict_to_peft,
6666
convert_state_dict_to_diffusers,
6767
convert_state_dict_to_kohya,
68+
convert_unet_state_dict_to_peft,
6869
is_wandb_available,
6970
)
7071
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
@@ -1319,6 +1320,37 @@ def load_model_hook(models, input_dir):
13191320
else:
13201321
raise ValueError(f"unexpected save model: {model.__class__}")
13211322

1323+
lora_state_dict, network_alphas = StableDiffusionPipeline.lora_state_dict(input_dir)
1324+
1325+
unet_state_dict = {f'{k.replace("unet.", "")}': v for k, v in lora_state_dict.items() if k.startswith("unet.")}
1326+
unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict)
1327+
incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name="default")
1328+
if incompatible_keys is not None:
1329+
# check only for unexpected keys
1330+
unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
1331+
if unexpected_keys:
1332+
logger.warning(
1333+
f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
1334+
f" {unexpected_keys}. "
1335+
)
1336+
1337+
if args.train_text_encoder:
1338+
# Do we need to call `scale_lora_layers()` here?
1339+
_set_state_dict_into_text_encoder(lora_state_dict, prefix="text_encoder.", text_encoder=text_encoder_one_)
1340+
1341+
_set_state_dict_into_text_encoder(
1342+
lora_state_dict, prefix="text_encoder_2.", text_encoder=text_encoder_one_
1343+
)
1344+
1345+
# Make sure the trainable params are in float32. This is again needed since the base models
1346+
# are in `weight_dtype`. More details:
1347+
# https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804
1348+
if args.mixed_precision == "fp16":
1349+
models = [unet_]
1350+
if args.train_text_encoder:
1351+
models.extend([text_encoder_one_])
1352+
# only upcast trainable parameters (LoRA) into fp32
1353+
cast_training_params(models)
13221354
lora_state_dict, network_alphas = StableDiffusionLoraLoaderMixin.lora_state_dict(input_dir)
13231355
StableDiffusionLoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
13241356

examples/community/README_community_scripts.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ If a community script doesn't work as expected, please open an issue and ping th
66

77
| Example | Description | Code Example | Colab | Author |
88
|:--------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------:|
9-
| Using IP-Adapter with negative noise | Using negative noise with IP-adapter to better control the generation (see the [original post](https://github.com/huggingface/diffusers/discussions/7167) on the forum for more details) | [IP-Adapter Negative Noise](#ip-adapter-negative-noise) | | [Álvaro Somoza](https://github.com/asomoza)|
10-
| asymmetric tiling |configure seamless image tiling independently for the X and Y axes | [Asymmetric Tiling](#asymmetric-tiling ) | | [alexisrolland](https://github.com/alexisrolland)|
11-
| Prompt scheduling callback |Allows changing prompts during a generation | [Prompt Scheduling](#prompt-scheduling ) | | [hlky](https://github.com/hlky)|
9+
| Using IP-Adapter with Negative Noise | Using negative noise with IP-adapter to better control the generation (see the [original post](https://github.com/huggingface/diffusers/discussions/7167) on the forum for more details) | [IP-Adapter Negative Noise](#ip-adapter-negative-noise) | https://github.com/huggingface/notebooks/blob/main/diffusers/ip_adapter_negative_noise.ipynb | [Álvaro Somoza](https://github.com/asomoza)|
10+
| Asymmetric Tiling |configure seamless image tiling independently for the X and Y axes | [Asymmetric Tiling](#Asymmetric-Tiling ) |https://github.com/huggingface/notebooks/blob/main/diffusers/asymetric_tiling.ipynb | [alexisrolland](https://github.com/alexisrolland)|
11+
| Prompt Scheduling Callback |Allows changing prompts during a generation | [Prompt Scheduling-Callback](#Prompt-Scheduling-Callback ) |https://github.com/huggingface/notebooks/blob/main/diffusers/prompt_scheduling_callback.ipynb | [hlky](https://github.com/hlky)|
1212

1313

1414
## Example usages
@@ -312,4 +312,6 @@ image = pipeline(
312312
callback_on_step_end=callback,
313313
callback_on_step_end_tensor_inputs=["prompt_embeds"],
314314
).images[0]
315+
torch.cuda.empty_cache()
316+
image.save('image.png')
315317
```

examples/community/matryoshka.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -868,7 +868,7 @@ def forward(
868868
blocks = list(zip(self.resnets, self.attentions))
869869

870870
for i, (resnet, attn) in enumerate(blocks):
871-
if self.training and self.gradient_checkpointing:
871+
if torch.is_grad_enabled() and self.gradient_checkpointing:
872872

873873
def create_custom_forward(module, return_dict=None):
874874
def custom_forward(*inputs):
@@ -1029,7 +1029,7 @@ def forward(
10291029

10301030
hidden_states = self.resnets[0](hidden_states, temb)
10311031
for attn, resnet in zip(self.attentions, self.resnets[1:]):
1032-
if self.training and self.gradient_checkpointing:
1032+
if torch.is_grad_enabled() and self.gradient_checkpointing:
10331033

10341034
def create_custom_forward(module, return_dict=None):
10351035
def custom_forward(*inputs):
@@ -1191,7 +1191,7 @@ def forward(
11911191

11921192
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
11931193

1194-
if self.training and self.gradient_checkpointing:
1194+
if torch.is_grad_enabled() and self.gradient_checkpointing:
11951195

11961196
def create_custom_forward(module, return_dict=None):
11971197
def custom_forward(*inputs):
@@ -1364,7 +1364,7 @@ def forward(
13641364

13651365
# Blocks
13661366
for block in self.transformer_blocks:
1367-
if self.training and self.gradient_checkpointing:
1367+
if torch.is_grad_enabled() and self.gradient_checkpointing:
13681368

13691369
def create_custom_forward(module, return_dict=None):
13701370
def custom_forward(*inputs):

examples/dreambooth/train_dreambooth_lora_sdxl.py

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@
6767
convert_state_dict_to_diffusers,
6868
convert_state_dict_to_kohya,
6969
convert_unet_state_dict_to_peft,
70+
is_peft_version,
7071
is_wandb_available,
7172
)
7273
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
@@ -1183,26 +1184,33 @@ def main(args):
11831184
text_encoder_one.gradient_checkpointing_enable()
11841185
text_encoder_two.gradient_checkpointing_enable()
11851186

1187+
def get_lora_config(rank, use_dora, target_modules):
1188+
base_config = {
1189+
"r": rank,
1190+
"lora_alpha": rank,
1191+
"init_lora_weights": "gaussian",
1192+
"target_modules": target_modules,
1193+
}
1194+
if use_dora:
1195+
if is_peft_version("<", "0.9.0"):
1196+
raise ValueError(
1197+
"You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`."
1198+
)
1199+
else:
1200+
base_config["use_dora"] = True
1201+
1202+
return LoraConfig(**base_config)
1203+
11861204
# now we will add new LoRA weights to the attention layers
1187-
unet_lora_config = LoraConfig(
1188-
r=args.rank,
1189-
use_dora=args.use_dora,
1190-
lora_alpha=args.rank,
1191-
init_lora_weights="gaussian",
1192-
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
1193-
)
1205+
unet_target_modules = ["to_k", "to_q", "to_v", "to_out.0"]
1206+
unet_lora_config = get_lora_config(rank=args.rank, use_dora=args.use_dora, target_modules=unet_target_modules)
11941207
unet.add_adapter(unet_lora_config)
11951208

11961209
# The text encoder comes from 🤗 transformers, so we cannot directly modify it.
11971210
# So, instead, we monkey-patch the forward calls of its attention-blocks.
11981211
if args.train_text_encoder:
1199-
text_lora_config = LoraConfig(
1200-
r=args.rank,
1201-
use_dora=args.use_dora,
1202-
lora_alpha=args.rank,
1203-
init_lora_weights="gaussian",
1204-
target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
1205-
)
1212+
text_target_modules = ["q_proj", "k_proj", "v_proj", "out_proj"]
1213+
text_lora_config = get_lora_config(rank=args.rank, use_dora=args.use_dora, target_modules=text_target_modules)
12061214
text_encoder_one.add_adapter(text_lora_config)
12071215
text_encoder_two.add_adapter(text_lora_config)
12081216

examples/research_projects/pixart/controlnet_pixart_alpha.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ def forward(
215215

216216
# 2. Blocks
217217
for block_index, block in enumerate(self.transformer.transformer_blocks):
218-
if self.training and self.gradient_checkpointing:
218+
if torch.is_grad_enabled() and self.gradient_checkpointing:
219219
# rc todo: for training and gradient checkpointing
220220
print("Gradient checkpointing is not supported for the controlnet transformer model, yet.")
221221
exit(1)
File renamed without changes.

0 commit comments

Comments
 (0)