@@ -192,12 +192,12 @@ def test_freeu(self):
192192        inputs ["output_type" ] =  "np" 
193193        output_no_freeu  =  pipe (** inputs )[0 ]
194194
195-         assert  not  np .allclose (output [ 0 ,  - 3 :,  - 3 :,  - 1 ],  output_freeu [ 0 ,  - 3 :,  - 3 :,  - 1 ]), ( 
196-             "Enabling of FreeU should lead to different results." 
197-         )
198-         assert  np .allclose (output ,  output_no_freeu ,  atol = 1e-2 ), ( 
199-             f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error= { np . abs ( output_no_freeu   -   output ). max () } ." 
200-         )
195+         assert  not  np .allclose (
196+             output [ 0 ,  - 3 :,  - 3 :,  - 1 ],  output_freeu [ 0 ,  - 3 :,  - 3 :,  - 1 ] 
197+         ),  "Enabling of FreeU should lead to different results." 
198+         assert  np .allclose (
199+             output ,  output_no_freeu ,  atol = 1e-2 
200+         ),  f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error= { np . abs ( output_no_freeu   -   output ). max () } ." 
201201
202202    def  test_fused_qkv_projections (self ):
203203        device  =  "cpu"   # ensure determinism for the device-dependent torch.Generator 
@@ -218,12 +218,12 @@ def test_fused_qkv_projections(self):
218218                and  hasattr (component , "original_attn_processors" )
219219                and  component .original_attn_processors  is  not None 
220220            ):
221-                 assert  check_qkv_fusion_processors_exist (component ), ( 
222-                     "Something wrong with the fused attention processors. Expected all the attention processors to be fused." 
223-                 )
224-                 assert  check_qkv_fusion_matches_attn_procs_length (component ,  component . original_attn_processors ), ( 
225-                     "Something wrong with the attention processors concerning the fused QKV projections." 
226-                 )
221+                 assert  check_qkv_fusion_processors_exist (
222+                     component 
223+                 ),  "Something wrong with the fused attention processors. Expected all the attention processors to be fused." 
224+                 assert  check_qkv_fusion_matches_attn_procs_length (
225+                     component ,  component . original_attn_processors 
226+                 ),  "Something wrong with the attention processors concerning the fused QKV projections." 
227227
228228        inputs  =  self .get_dummy_inputs (device )
229229        inputs ["return_dict" ] =  False 
@@ -236,15 +236,15 @@ def test_fused_qkv_projections(self):
236236        image_disabled  =  pipe (** inputs )[0 ]
237237        image_slice_disabled  =  image_disabled [0 , - 3 :, - 3 :, - 1 ]
238238
239-         assert  np .allclose (original_image_slice ,  image_slice_fused ,  atol = 1e-2 ,  rtol = 1e-2 ), ( 
240-             "Fusion of QKV projections shouldn't affect the outputs." 
241-         )
242-         assert  np .allclose (image_slice_fused ,  image_slice_disabled ,  atol = 1e-2 ,  rtol = 1e-2 ), ( 
243-             "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." 
244-         )
245-         assert  np .allclose (original_image_slice ,  image_slice_disabled ,  atol = 1e-2 ,  rtol = 1e-2 ), ( 
246-             "Original outputs should match when fused QKV projections are disabled." 
247-         )
239+         assert  np .allclose (
240+             original_image_slice ,  image_slice_fused ,  atol = 1e-2 ,  rtol = 1e-2 
241+         ),  "Fusion of QKV projections shouldn't affect the outputs." 
242+         assert  np .allclose (
243+             image_slice_fused ,  image_slice_disabled ,  atol = 1e-2 ,  rtol = 1e-2 
244+         ),  "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." 
245+         assert  np .allclose (
246+             original_image_slice ,  image_slice_disabled ,  atol = 1e-2 ,  rtol = 1e-2 
247+         ),  "Original outputs should match when fused QKV projections are disabled." 
248248
249249
250250class  IPAdapterTesterMixin :
@@ -915,9 +915,9 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3):
915915
916916        for  component  in  pipe_original .components .values ():
917917            if  hasattr (component , "attn_processors" ):
918-                 assert  all (type ( proc )  ==   AttnProcessor   for   proc   in   component . attn_processors . values ()), ( 
919-                     "`from_pipe` changed the attention processor  in original pipeline." 
920-                 )
918+                 assert  all (
919+                     type ( proc )  ==   AttnProcessor   for   proc  in  component . attn_processors . values () 
920+                 ),  "`from_pipe` changed the attention processor in original pipeline." 
921921
922922    @require_accelerator  
923923    @require_accelerate_version_greater ("0.14.0" ) 
@@ -2583,12 +2583,12 @@ def test_pyramid_attention_broadcast_inference(self, expected_atol: float = 0.2)
25832583        image_slice_pab_disabled  =  output .flatten ()
25842584        image_slice_pab_disabled  =  np .concatenate ((image_slice_pab_disabled [:8 ], image_slice_pab_disabled [- 8 :]))
25852585
2586-         assert  np .allclose (original_image_slice ,  image_slice_pab_enabled ,  atol = expected_atol ), ( 
2587-             "PAB outputs should not differ much in specified timestep range." 
2588-         )
2589-         assert  np .allclose (original_image_slice ,  image_slice_pab_disabled ,  atol = 1e-4 ), ( 
2590-             "Outputs from normal inference and after disabling cache should not differ." 
2591-         )
2586+         assert  np .allclose (
2587+             original_image_slice ,  image_slice_pab_enabled ,  atol = expected_atol 
2588+         ),  "PAB outputs should not differ much in specified timestep range." 
2589+         assert  np .allclose (
2590+             original_image_slice ,  image_slice_pab_disabled ,  atol = 1e-4 
2591+         ),  "Outputs from normal inference and after disabling cache should not differ." 
25922592
25932593
25942594class  FasterCacheTesterMixin :
@@ -2653,12 +2653,12 @@ def run_forward(pipe):
26532653        output  =  run_forward (pipe ).flatten ()
26542654        image_slice_faster_cache_disabled  =  np .concatenate ((output [:8 ], output [- 8 :]))
26552655
2656-         assert  np .allclose (original_image_slice ,  image_slice_faster_cache_enabled ,  atol = expected_atol ), ( 
2657-             "FasterCache outputs should not differ much in specified timestep range." 
2658-         )
2659-         assert  np .allclose (original_image_slice ,  image_slice_faster_cache_disabled ,  atol = 1e-4 ), ( 
2660-             "Outputs from normal inference and after disabling cache should not differ." 
2661-         )
2656+         assert  np .allclose (
2657+             original_image_slice ,  image_slice_faster_cache_enabled ,  atol = expected_atol 
2658+         ),  "FasterCache outputs should not differ much in specified timestep range." 
2659+         assert  np .allclose (
2660+             original_image_slice ,  image_slice_faster_cache_disabled ,  atol = 1e-4 
2661+         ),  "Outputs from normal inference and after disabling cache should not differ." 
26622662
26632663    def  test_faster_cache_state (self ):
26642664        from  diffusers .hooks .faster_cache  import  _FASTER_CACHE_BLOCK_HOOK , _FASTER_CACHE_DENOISER_HOOK 
0 commit comments