Skip to content

Commit aaa1d08

Browse files
committed
fix format issue
Signed-off-by: Liu, Kaixuan <[email protected]>
1 parent e0452aa commit aaa1d08

File tree

4 files changed

+44
-44
lines changed

4 files changed

+44
-44
lines changed

tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -212,13 +212,13 @@ def test_kandinsky_controlnet(self):
212212
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595]
213213
)
214214

215-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, (
216-
f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
217-
)
215+
assert (
216+
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
217+
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
218218

219-
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, (
220-
f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
221-
)
219+
assert (
220+
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
221+
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
222222

223223
def test_float16_inference(self):
224224
super().test_float16_inference(expected_max_diff=1e-1)

tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ def test_ledits_pp_editing(self):
257257
0.8378906,
258258
0.94433594,
259259
0.91503906,
260-
0.8491211
260+
0.8491211,
261261
]
262262
),
263263
("cuda", 7): np.array(

tests/pipelines/test_pipelines_common.py

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -192,12 +192,12 @@ def test_freeu(self):
192192
inputs["output_type"] = "np"
193193
output_no_freeu = pipe(**inputs)[0]
194194

195-
assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), (
196-
"Enabling of FreeU should lead to different results."
197-
)
198-
assert np.allclose(output, output_no_freeu, atol=1e-2), (
199-
f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}."
200-
)
195+
assert not np.allclose(
196+
output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]
197+
), "Enabling of FreeU should lead to different results."
198+
assert np.allclose(
199+
output, output_no_freeu, atol=1e-2
200+
), f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}."
201201

202202
def test_fused_qkv_projections(self):
203203
device = "cpu" # ensure determinism for the device-dependent torch.Generator
@@ -218,12 +218,12 @@ def test_fused_qkv_projections(self):
218218
and hasattr(component, "original_attn_processors")
219219
and component.original_attn_processors is not None
220220
):
221-
assert check_qkv_fusion_processors_exist(component), (
222-
"Something wrong with the fused attention processors. Expected all the attention processors to be fused."
223-
)
224-
assert check_qkv_fusion_matches_attn_procs_length(component, component.original_attn_processors), (
225-
"Something wrong with the attention processors concerning the fused QKV projections."
226-
)
221+
assert check_qkv_fusion_processors_exist(
222+
component
223+
), "Something wrong with the fused attention processors. Expected all the attention processors to be fused."
224+
assert check_qkv_fusion_matches_attn_procs_length(
225+
component, component.original_attn_processors
226+
), "Something wrong with the attention processors concerning the fused QKV projections."
227227

228228
inputs = self.get_dummy_inputs(device)
229229
inputs["return_dict"] = False
@@ -236,15 +236,15 @@ def test_fused_qkv_projections(self):
236236
image_disabled = pipe(**inputs)[0]
237237
image_slice_disabled = image_disabled[0, -3:, -3:, -1]
238238

239-
assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), (
240-
"Fusion of QKV projections shouldn't affect the outputs."
241-
)
242-
assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), (
243-
"Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled."
244-
)
245-
assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), (
246-
"Original outputs should match when fused QKV projections are disabled."
247-
)
239+
assert np.allclose(
240+
original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2
241+
), "Fusion of QKV projections shouldn't affect the outputs."
242+
assert np.allclose(
243+
image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2
244+
), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled."
245+
assert np.allclose(
246+
original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2
247+
), "Original outputs should match when fused QKV projections are disabled."
248248

249249

250250
class IPAdapterTesterMixin:
@@ -915,9 +915,9 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3):
915915

916916
for component in pipe_original.components.values():
917917
if hasattr(component, "attn_processors"):
918-
assert all(type(proc) == AttnProcessor for proc in component.attn_processors.values()), (
919-
"`from_pipe` changed the attention processor in original pipeline."
920-
)
918+
assert all(
919+
type(proc) == AttnProcessor for proc in component.attn_processors.values()
920+
), "`from_pipe` changed the attention processor in original pipeline."
921921

922922
@require_accelerator
923923
@require_accelerate_version_greater("0.14.0")
@@ -2583,12 +2583,12 @@ def test_pyramid_attention_broadcast_inference(self, expected_atol: float = 0.2)
25832583
image_slice_pab_disabled = output.flatten()
25842584
image_slice_pab_disabled = np.concatenate((image_slice_pab_disabled[:8], image_slice_pab_disabled[-8:]))
25852585

2586-
assert np.allclose(original_image_slice, image_slice_pab_enabled, atol=expected_atol), (
2587-
"PAB outputs should not differ much in specified timestep range."
2588-
)
2589-
assert np.allclose(original_image_slice, image_slice_pab_disabled, atol=1e-4), (
2590-
"Outputs from normal inference and after disabling cache should not differ."
2591-
)
2586+
assert np.allclose(
2587+
original_image_slice, image_slice_pab_enabled, atol=expected_atol
2588+
), "PAB outputs should not differ much in specified timestep range."
2589+
assert np.allclose(
2590+
original_image_slice, image_slice_pab_disabled, atol=1e-4
2591+
), "Outputs from normal inference and after disabling cache should not differ."
25922592

25932593

25942594
class FasterCacheTesterMixin:
@@ -2653,12 +2653,12 @@ def run_forward(pipe):
26532653
output = run_forward(pipe).flatten()
26542654
image_slice_faster_cache_disabled = np.concatenate((output[:8], output[-8:]))
26552655

2656-
assert np.allclose(original_image_slice, image_slice_faster_cache_enabled, atol=expected_atol), (
2657-
"FasterCache outputs should not differ much in specified timestep range."
2658-
)
2659-
assert np.allclose(original_image_slice, image_slice_faster_cache_disabled, atol=1e-4), (
2660-
"Outputs from normal inference and after disabling cache should not differ."
2661-
)
2656+
assert np.allclose(
2657+
original_image_slice, image_slice_faster_cache_enabled, atol=expected_atol
2658+
), "FasterCache outputs should not differ much in specified timestep range."
2659+
assert np.allclose(
2660+
original_image_slice, image_slice_faster_cache_disabled, atol=1e-4
2661+
), "Outputs from normal inference and after disabling cache should not differ."
26622662

26632663
def test_faster_cache_state(self):
26642664
from diffusers.hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK

tests/quantization/gguf/test_gguf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ def test_pipeline_inference(self):
312312
0.29492188,
313313
0.140625,
314314
0.3046875,
315-
0.28515625
315+
0.28515625,
316316
]
317317
),
318318
("cuda", 7): np.array(

0 commit comments

Comments
 (0)