diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py index f569013a0527..bddab8227ad0 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py @@ -430,6 +430,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--with_prior_preservation", default=False, @@ -1554,6 +1557,7 @@ def main(args): transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) @@ -1562,6 +1566,7 @@ def main(args): text_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], ) diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py index 58b1aa0e5618..770f9b92f914 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py @@ -658,6 +658,8 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--use_dora", action="store_true", @@ -1248,6 +1250,7 @@ def main(args): unet_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, use_dora=args.use_dora, init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], @@ -1260,6 +1263,7 @@ def main(args): text_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, use_dora=args.use_dora, init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py index dae618f43afd..29d454ba8e85 100644 --- a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py +++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py @@ -767,6 +767,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--use_dora", action="store_true", @@ -1558,6 +1561,7 @@ def main(args): r=args.rank, use_dora=args.use_dora, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) @@ -1570,6 +1574,7 @@ def main(args): r=args.rank, use_dora=args.use_dora, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], ) diff --git a/examples/dreambooth/train_dreambooth_lora.py b/examples/dreambooth/train_dreambooth_lora.py index a9552c14cad1..7c008970bd59 100644 --- a/examples/dreambooth/train_dreambooth_lora.py +++ b/examples/dreambooth/train_dreambooth_lora.py @@ -524,6 +524,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--image_interpolation_mode", type=str, @@ -932,6 +935,7 @@ def main(args): unet_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0", "add_k_proj", "add_v_proj"], ) @@ -942,6 +946,7 @@ def main(args): text_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], ) diff --git a/examples/dreambooth/train_dreambooth_lora_flux.py b/examples/dreambooth/train_dreambooth_lora_flux.py index 5341c321c312..1caf9c62d79b 100644 --- a/examples/dreambooth/train_dreambooth_lora_flux.py +++ b/examples/dreambooth/train_dreambooth_lora_flux.py @@ -358,6 +358,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--with_prior_preservation", default=False, @@ -1236,6 +1239,7 @@ def main(args): transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) @@ -1244,6 +1248,7 @@ def main(args): text_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], ) diff --git a/examples/dreambooth/train_dreambooth_lora_hidream.py b/examples/dreambooth/train_dreambooth_lora_hidream.py index aa3ffb2483e1..f368fb809e73 100644 --- a/examples/dreambooth/train_dreambooth_lora_hidream.py +++ b/examples/dreambooth/train_dreambooth_lora_hidream.py @@ -417,6 +417,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--with_prior_preservation", default=False, @@ -1161,6 +1164,7 @@ def main(args): transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) diff --git a/examples/dreambooth/train_dreambooth_lora_lumina2.py b/examples/dreambooth/train_dreambooth_lora_lumina2.py index 382879ff2e4a..da499bce711d 100644 --- a/examples/dreambooth/train_dreambooth_lora_lumina2.py +++ b/examples/dreambooth/train_dreambooth_lora_lumina2.py @@ -328,6 +328,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--with_prior_preservation", default=False, @@ -1023,6 +1026,7 @@ def main(args): transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) diff --git a/examples/dreambooth/train_dreambooth_lora_sana.py b/examples/dreambooth/train_dreambooth_lora_sana.py index bef6e045949d..0c4a16d1802f 100644 --- a/examples/dreambooth/train_dreambooth_lora_sana.py +++ b/examples/dreambooth/train_dreambooth_lora_sana.py @@ -323,6 +323,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--with_prior_preservation", default=False, @@ -1021,6 +1024,7 @@ def main(args): transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) diff --git a/examples/dreambooth/train_dreambooth_lora_sd3.py b/examples/dreambooth/train_dreambooth_lora_sd3.py index b1786260d1f2..d00d2fafe827 100644 --- a/examples/dreambooth/train_dreambooth_lora_sd3.py +++ b/examples/dreambooth/train_dreambooth_lora_sd3.py @@ -367,6 +367,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--with_prior_preservation", default=False, @@ -1264,6 +1267,7 @@ def main(args): transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) @@ -1273,6 +1277,7 @@ def main(args): text_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, + lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], ) diff --git a/examples/dreambooth/train_dreambooth_lora_sdxl.py b/examples/dreambooth/train_dreambooth_lora_sdxl.py index 8af8020bb3fc..364c0da8d5a6 100644 --- a/examples/dreambooth/train_dreambooth_lora_sdxl.py +++ b/examples/dreambooth/train_dreambooth_lora_sdxl.py @@ -659,6 +659,9 @@ def parse_args(input_args=None): default=4, help=("The dimension of the LoRA update matrices."), ) + + parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") + parser.add_argument( "--use_dora", action="store_true", @@ -1199,10 +1202,11 @@ def main(args): text_encoder_one.gradient_checkpointing_enable() text_encoder_two.gradient_checkpointing_enable() - def get_lora_config(rank, use_dora, target_modules): + def get_lora_config(rank, dropout, use_dora, target_modules): base_config = { "r": rank, "lora_alpha": rank, + "lora_dropout": dropout, "init_lora_weights": "gaussian", "target_modules": target_modules, } @@ -1218,14 +1222,24 @@ def get_lora_config(rank, use_dora, target_modules): # now we will add new LoRA weights to the attention layers unet_target_modules = ["to_k", "to_q", "to_v", "to_out.0"] - unet_lora_config = get_lora_config(rank=args.rank, use_dora=args.use_dora, target_modules=unet_target_modules) + unet_lora_config = get_lora_config( + rank=args.rank, + dropout=args.lora_dropout, + use_dora=args.use_dora, + target_modules=unet_target_modules, + ) unet.add_adapter(unet_lora_config) # The text encoder comes from 🤗 transformers, so we cannot directly modify it. # So, instead, we monkey-patch the forward calls of its attention-blocks. if args.train_text_encoder: text_target_modules = ["q_proj", "k_proj", "v_proj", "out_proj"] - text_lora_config = get_lora_config(rank=args.rank, use_dora=args.use_dora, target_modules=text_target_modules) + text_lora_config = get_lora_config( + rank=args.rank, + dropout=args.lora_dropout, + use_dora=args.use_dora, + target_modules=text_target_modules, + ) text_encoder_one.add_adapter(text_lora_config) text_encoder_two.add_adapter(text_lora_config)