Skip to content

Commit bf11691

Browse files
committed
use xfail
1 parent d175d93 commit bf11691

File tree

2 files changed

+31
-23
lines changed

2 files changed

+31
-23
lines changed

tests/models/test_modeling_common.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1366,7 +1366,6 @@ def check_linear_dtype(module, storage_dtype, compute_dtype):
13661366
if any(re.search(pattern, name) for pattern in patterns_to_check):
13671367
dtype_to_check = compute_dtype
13681368
if getattr(submodule, "weight", None) is not None:
1369-
print(name, submodule.weight.dtype, dtype_to_check, patterns_to_check)
13701369
self.assertEqual(submodule.weight.dtype, dtype_to_check)
13711370
if getattr(submodule, "bias", None) is not None:
13721371
self.assertEqual(submodule.bias.dtype, dtype_to_check)

tests/models/unets/test_models_unet_1d.py

Lines changed: 31 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
import unittest
1717

18+
import pytest
1819
import torch
1920

2021
from diffusers import UNet1DModel
@@ -152,20 +153,24 @@ def test_unet_1d_maestro(self):
152153
assert (output_sum - 224.0896).abs() < 0.5
153154
assert (output_max - 0.0607).abs() < 4e-4
154155

155-
@unittest.skip(
156-
"RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations "
157-
"not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n"
158-
"1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n"
159-
"2. Unskip this test."
156+
@pytest.mark.xfail(
157+
reason=(
158+
"RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations "
159+
"not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n"
160+
"1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n"
161+
"2. Unskip this test."
162+
),
160163
)
161164
def test_layerwise_casting_inference(self):
162-
pass
163-
164-
@unittest.skip(
165-
"RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations "
166-
"not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n"
167-
"1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n"
168-
"2. Unskip this test."
165+
super().test_layerwise_casting_inference()
166+
167+
@pytest.mark.xfail(
168+
reason=(
169+
"RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations "
170+
"not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n"
171+
"1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n"
172+
"2. Unskip this test."
173+
),
169174
)
170175
def test_layerwise_casting_memory(self):
171176
pass
@@ -293,20 +298,24 @@ def test_forward_with_norm_groups(self):
293298
# Not implemented yet for this UNet
294299
pass
295300

296-
@unittest.skip(
297-
"RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations "
298-
"not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n"
299-
"1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n"
300-
"2. Unskip this test."
301+
@pytest.mark.xfail(
302+
reason=(
303+
"RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations "
304+
"not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n"
305+
"1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n"
306+
"2. Unskip this test."
307+
),
301308
)
302309
def test_layerwise_casting_inference(self):
303310
pass
304311

305-
@unittest.skip(
306-
"RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations "
307-
"not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n"
308-
"1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n"
309-
"2. Unskip this test."
312+
@pytest.mark.xfail(
313+
reason=(
314+
"RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations "
315+
"not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n"
316+
"1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n"
317+
"2. Unskip this test."
318+
),
310319
)
311320
def test_layerwise_casting_memory(self):
312321
pass

0 commit comments

Comments
 (0)