Skip to content

Commit 007eb7f

Browse files
committed
more
Signed-off-by: YAO Matrix <[email protected]>
1 parent 5ed1810 commit 007eb7f

File tree

6 files changed

+8
-10
lines changed

6 files changed

+8
-10
lines changed

src/diffusers/utils/testing_utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -300,9 +300,7 @@ def require_torch_gpu(test_case):
300300

301301
def require_torch_cuda_compatibility(expected_compute_capability):
302302
def decorator(test_case):
303-
if not torch.cuda.is_available():
304-
return unittest.skip(test_case)
305-
else:
303+
if torch.cuda.is_available():
306304
current_compute_capability = get_torch_cuda_device_capability()
307305
return unittest.skipUnless(
308306
float(current_compute_capability) == float(expected_compute_capability),

tests/pipelines/flux/test_pipeline_flux.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def test_flux_true_cfg(self):
224224

225225
@nightly
226226
@require_big_accelerator
227-
@pytest.mark.big_gpu_with_torch_cuda
227+
@pytest.mark.big_accelerator
228228
class FluxPipelineSlowTests(unittest.TestCase):
229229
pipeline_class = FluxPipeline
230230
repo_id = "black-forest-labs/FLUX.1-schnell"
@@ -312,7 +312,7 @@ def test_flux_inference(self):
312312

313313
@slow
314314
@require_big_accelerator
315-
@pytest.mark.big_gpu_with_torch_cuda
315+
@pytest.mark.big_accelerator
316316
class FluxIPAdapterPipelineSlowTests(unittest.TestCase):
317317
pipeline_class = FluxPipeline
318318
repo_id = "black-forest-labs/FLUX.1-dev"

tests/pipelines/flux/test_pipeline_flux_redux.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
@slow
2121
@require_big_accelerator
22-
@pytest.mark.big_gpu_with_torch_cuda
22+
@pytest.mark.big_accelerator
2323
class FluxReduxSlowTests(unittest.TestCase):
2424
pipeline_class = FluxPriorReduxPipeline
2525
repo_id = "black-forest-labs/FLUX.1-Redux-dev"

tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ def test_skip_guidance_layers(self):
233233

234234
@slow
235235
@require_big_accelerator
236-
@pytest.mark.big_gpu_with_torch_cuda
236+
@pytest.mark.big_accelerator
237237
class StableDiffusion3PipelineSlowTests(unittest.TestCase):
238238
pipeline_class = StableDiffusion3Pipeline
239239
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"

tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ def test_multi_vae(self):
168168

169169
@slow
170170
@require_big_accelerator
171-
@pytest.mark.big_gpu_with_torch_cuda
171+
@pytest.mark.big_accelerator
172172
class StableDiffusion3Img2ImgPipelineSlowTests(unittest.TestCase):
173173
pipeline_class = StableDiffusion3Img2ImgPipeline
174174
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"

tests/quantization/torchao/test_torchao.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -598,14 +598,14 @@ def _check_serialization_expected_slice(self, quant_method, quant_method_kwargs,
598598
)
599599
self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3)
600600

601-
def test_int_a8w8_cuda(self):
601+
def test_int_a8w8_accelerator(self):
602602
quant_method, quant_method_kwargs = "int8_dynamic_activation_int8_weight", {}
603603
expected_slice = np.array([0.3633, -0.1357, -0.0188, -0.249, -0.4688, 0.5078, -0.1289, -0.6914, 0.4551])
604604
device = torch_device
605605
self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice)
606606
self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device)
607607

608-
def test_int_a16w8_cuda(self):
608+
def test_int_a16w8_accelerator(self):
609609
quant_method, quant_method_kwargs = "int8_weight_only", {}
610610
expected_slice = np.array([0.3613, -0.127, -0.0223, -0.2539, -0.459, 0.4961, -0.1357, -0.6992, 0.4551])
611611
device = torch_device

0 commit comments

Comments
 (0)