@@ -430,6 +430,97 @@ def test_correct_lora_configs_with_different_ranks(self):
430430 self .assertTrue (not np .allclose (original_output , lora_output_diff_alpha , atol = 1e-3 , rtol = 1e-3 ))
431431 self .assertTrue (not np .allclose (lora_output_diff_alpha , lora_output_same_rank , atol = 1e-3 , rtol = 1e-3 ))
432432
433+ def test_lora_expanding_shape_with_normal_lora_raises_error (self ):
434+ # TODO: This test checks if an error is raised when a lora expands shapes (like control loras) but
435+ # another lora with correct shapes is loaded. This is not supported at the moment and should raise an error.
436+ # When we do support it, this test should be removed. Context: https://github.com/huggingface/diffusers/issues/10180
437+ components , _ , _ = self .get_dummy_components (FlowMatchEulerDiscreteScheduler )
438+ pipe = self .pipeline_class (** components )
439+ pipe = pipe .to (torch_device )
440+ pipe .set_progress_bar_config (disable = None )
441+
442+ logger = logging .get_logger ("diffusers.loaders.lora_pipeline" )
443+ logger .setLevel (logging .DEBUG )
444+
445+ out_features , in_features = pipe .transformer .x_embedder .weight .shape
446+ rank = 4
447+
448+ shape_expander_lora_A = torch .nn .Linear (2 * in_features , rank , bias = False )
449+ shape_expander_lora_B = torch .nn .Linear (rank , out_features , bias = False )
450+ lora_state_dict = {
451+ "transformer.x_embedder.lora_A.weight" : shape_expander_lora_A .weight ,
452+ "transformer.x_embedder.lora_B.weight" : shape_expander_lora_B .weight ,
453+ }
454+ with CaptureLogger (logger ) as cap_logger :
455+ pipe .load_lora_weights (lora_state_dict , "adapter-1" )
456+ self .assertTrue (check_if_lora_correctly_set (pipe .transformer ), "Lora not correctly set in denoiser" )
457+
458+ self .assertTrue (pipe .transformer .x_embedder .weight .data .shape [1 ] == 2 * in_features )
459+ self .assertTrue (pipe .transformer .config .in_channels == 2 * in_features )
460+ self .assertTrue (cap_logger .out .startswith ("Expanding the nn.Linear input/output features for module" ))
461+
462+ normal_lora_A = torch .nn .Linear (in_features , rank , bias = False )
463+ normal_lora_B = torch .nn .Linear (rank , out_features , bias = False )
464+ lora_state_dict = {
465+ "transformer.x_embedder.lora_A.weight" : normal_lora_A .weight ,
466+ "transformer.x_embedder.lora_B.weight" : normal_lora_B .weight ,
467+ }
468+
469+ # The first lora expanded the input features of x_embedder. Here, we are trying to load a lora with the correct
470+ # input features before expansion. This should raise an error about the weight shapes being incompatible.
471+ self .assertRaisesRegex (
472+ RuntimeError ,
473+ "size mismatch for x_embedder.lora_A.adapter-2.weight" ,
474+ pipe .load_lora_weights ,
475+ lora_state_dict ,
476+ "adapter-2" ,
477+ )
478+
479+ # Test the opposite case where the first lora has the correct input features and the second lora has expanded input features.
480+ # This should raise a runtime error on input shapes being incompatible. But it doesn't. This is because PEFT renames the
481+ # original layers as `base_layer` and the lora layers with the adapter names. This makes our logic to check if a lora
482+ # weight is compatible with the current model incorrect. This should be addressed when attempting support for
483+ # https://github.com/huggingface/diffusers/issues/10180 (TODO)
484+ components , _ , _ = self .get_dummy_components (FlowMatchEulerDiscreteScheduler )
485+ pipe = self .pipeline_class (** components )
486+ pipe = pipe .to (torch_device )
487+ pipe .set_progress_bar_config (disable = None )
488+
489+ logger = logging .get_logger ("diffusers.loaders.lora_pipeline" )
490+ logger .setLevel (logging .DEBUG )
491+
492+ out_features , in_features = pipe .transformer .x_embedder .weight .shape
493+ rank = 4
494+
495+ lora_state_dict = {
496+ "transformer.x_embedder.lora_A.weight" : normal_lora_A .weight ,
497+ "transformer.x_embedder.lora_B.weight" : normal_lora_B .weight ,
498+ }
499+
500+ with CaptureLogger (logger ) as cap_logger :
501+ pipe .load_lora_weights (lora_state_dict , "adapter-1" )
502+ self .assertTrue (check_if_lora_correctly_set (pipe .transformer ), "Lora not correctly set in denoiser" )
503+
504+ self .assertTrue (pipe .transformer .x_embedder .weight .data .shape [1 ] == in_features )
505+ self .assertTrue (pipe .transformer .config .in_channels == in_features )
506+ self .assertFalse (cap_logger .out .startswith ("Expanding the nn.Linear input/output features for module" ))
507+
508+ lora_state_dict = {
509+ "transformer.x_embedder.lora_A.weight" : shape_expander_lora_A .weight ,
510+ "transformer.x_embedder.lora_B.weight" : shape_expander_lora_B .weight ,
511+ }
512+
513+ # We should check for input shapes being incompatible here. But because above mentioned issue is
514+ # not a supported use case, and because of the PEFT renaming, we will currently have a shape
515+ # mismatch error.
516+ self .assertRaisesRegex (
517+ RuntimeError ,
518+ "size mismatch for x_embedder.lora_A.adapter-2.weight" ,
519+ pipe .load_lora_weights ,
520+ lora_state_dict ,
521+ "adapter-2" ,
522+ )
523+
433524 @unittest .skip ("Not supported in Flux." )
434525 def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options (self ):
435526 pass
0 commit comments