@@ -435,6 +435,14 @@ def test_lora_expanding_shape_with_normal_lora_raises_error(self):
435435 # another lora with correct shapes is loaded. This is not supported at the moment and should raise an error.
436436 # When we do support it, this test should be removed. Context: https://github.com/huggingface/diffusers/issues/10180
437437 components , _ , _ = self .get_dummy_components (FlowMatchEulerDiscreteScheduler )
438+
439+ # Change the transformer config to mimic a real use case.
440+ num_channels_without_control = 4
441+ transformer = FluxTransformer2DModel .from_config (
442+ components ["transformer" ].config , in_channels = num_channels_without_control
443+ ).to (torch_device )
444+ components ["transformer" ] = transformer
445+
438446 pipe = self .pipeline_class (** components )
439447 pipe = pipe .to (torch_device )
440448 pipe .set_progress_bar_config (disable = None )
@@ -453,12 +461,16 @@ def test_lora_expanding_shape_with_normal_lora_raises_error(self):
453461 }
454462 with CaptureLogger (logger ) as cap_logger :
455463 pipe .load_lora_weights (lora_state_dict , "adapter-1" )
456- self .assertTrue (check_if_lora_correctly_set (pipe .transformer ), "Lora not correctly set in denoiser" )
457464
465+ self .assertTrue (check_if_lora_correctly_set (pipe .transformer ), "Lora not correctly set in denoiser" )
466+ self .assertTrue (pipe .get_active_adapters () == ["adapter-1" ])
458467 self .assertTrue (pipe .transformer .x_embedder .weight .data .shape [1 ] == 2 * in_features )
459468 self .assertTrue (pipe .transformer .config .in_channels == 2 * in_features )
460469 self .assertTrue (cap_logger .out .startswith ("Expanding the nn.Linear input/output features for module" ))
461470
471+ _ , _ , inputs = self .get_dummy_inputs (with_generator = False )
472+ lora_output = pipe (** inputs , generator = torch .manual_seed (0 ))[0 ]
473+
462474 normal_lora_A = torch .nn .Linear (in_features , rank , bias = False )
463475 normal_lora_B = torch .nn .Linear (rank , out_features , bias = False )
464476 lora_state_dict = {
@@ -475,13 +487,26 @@ def test_lora_expanding_shape_with_normal_lora_raises_error(self):
475487 lora_state_dict ,
476488 "adapter-2" ,
477489 )
490+ # We should have `adapter-1` as the only adapter.
491+ self .assertTrue (pipe .get_active_adapters () == ["adapter-1" ])
492+
493+ # Check if the output is the same after lora loading error
494+ lora_output_after_error = pipe (** inputs , generator = torch .manual_seed (0 ))[0 ]
495+ self .assertTrue (np .allclose (lora_output , lora_output_after_error , atol = 1e-3 , rtol = 1e-3 ))
478496
479497 # Test the opposite case where the first lora has the correct input features and the second lora has expanded input features.
480498 # This should raise a runtime error on input shapes being incompatible. But it doesn't. This is because PEFT renames the
481499 # original layers as `base_layer` and the lora layers with the adapter names. This makes our logic to check if a lora
482500 # weight is compatible with the current model inadequate. This should be addressed when attempting support for
483501 # https://github.com/huggingface/diffusers/issues/10180 (TODO)
484502 components , _ , _ = self .get_dummy_components (FlowMatchEulerDiscreteScheduler )
503+ # Change the transformer config to mimic a real use case.
504+ num_channels_without_control = 4
505+ transformer = FluxTransformer2DModel .from_config (
506+ components ["transformer" ].config , in_channels = num_channels_without_control
507+ ).to (torch_device )
508+ components ["transformer" ] = transformer
509+
485510 pipe = self .pipeline_class (** components )
486511 pipe = pipe .to (torch_device )
487512 pipe .set_progress_bar_config (disable = None )
0 commit comments