Skip to content

Commit 6222f4d

Browse files
committed
Mark test_layerwise_upcasting as is_flaky
1 parent ea5a6a8 commit 6222f4d

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

tests/models/test_modeling_common.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
backend_empty_cache,
6262
floats_tensor,
6363
get_python_version,
64+
is_flaky,
6465
is_torch_compile,
6566
numpy_cosine_similarity_distance,
6667
require_peft_backend,
@@ -1436,6 +1437,7 @@ def test_fn(storage_dtype, compute_dtype):
14361437
test_fn(torch.float8_e5m2, torch.float32)
14371438
test_fn(torch.float8_e4m3fn, torch.bfloat16)
14381439

1440+
@is_flaky
14391441
def test_layerwise_casting_inference(self):
14401442
from diffusers.hooks.layerwise_casting import DEFAULT_SKIP_MODULES_PATTERN, SUPPORTED_PYTORCH_LAYERS
14411443

@@ -1473,7 +1475,8 @@ def test_layerwise_casting(storage_dtype, compute_dtype):
14731475

14741476
# The precision test is not very important for fast tests. In most cases, the outputs will not be the same.
14751477
# We just want to make sure that the layerwise casting is working as expected.
1476-
self.assertTrue(numpy_cosine_similarity_distance(base_slice, output) < 1.0)
1478+
diff = numpy_cosine_similarity_distance(base_slice, output)
1479+
self.assertTrue(diff < 1.0, f"Expected {diff=} < 1.0.")
14771480

14781481
test_layerwise_casting(torch.float16, torch.float32)
14791482
test_layerwise_casting(torch.float8_e4m3fn, torch.float32)

0 commit comments

Comments
 (0)