@@ -877,11 +877,15 @@ def test_simple_inference_with_text_denoiser_lora_unfused(
877877            pipe , denoiser  =  self .check_if_adapters_added_correctly (pipe , text_lora_config , denoiser_lora_config )
878878
879879            pipe .fuse_lora (components = self .pipeline_class ._lora_loadable_modules )
880-             assert  pipe .num_fused_loras  ==  1 , pipe .num_fused_loras 
880+             self .assertTrue (
881+                 pipe .num_fused_loras  ==  1 , pipe .num_fused_loras , f"{ pipe .num_fused_loras = } { pipe .fused_loras = }  
882+             )
881883            output_fused_lora  =  pipe (** inputs , generator = torch .manual_seed (0 ))[0 ]
882884
883885            pipe .unfuse_lora (components = self .pipeline_class ._lora_loadable_modules )
884-             assert  pipe .num_fused_loras  ==  0 , pipe .num_fused_loras 
886+             self .assertTrue (
887+                 pipe .num_fused_loras  ==  0 , pipe .num_fused_loras , f"{ pipe .num_fused_loras = } { pipe .fused_loras = }  
888+             )
885889            output_unfused_lora  =  pipe (** inputs , generator = torch .manual_seed (0 ))[0 ]
886890
887891            # unloading should remove the LoRA layers 
@@ -1703,7 +1707,7 @@ def test_lora_scale_kwargs_match_fusion(self, expected_atol: float = 1e-3, expec
17031707                    adapter_names = ["adapter-1" ],
17041708                    lora_scale = lora_scale ,
17051709                )
1706-                 assert   pipe .num_fused_loras  ==  1 , pipe .num_fused_loras 
1710+                 self . assertTrue ( pipe .num_fused_loras  ==  1 , f" { pipe .num_fused_loras = } ,  { pipe . fused_loras = } " ) 
17071711
17081712                outputs_lora_1_fused  =  pipe (** inputs , generator = torch .manual_seed (0 ))[0 ]
17091713
0 commit comments