Skip to content

Commit 58b79f2

Browse files
committed
address feedback
1 parent e10caf4 commit 58b79f2

File tree

4 files changed

+0
-8
lines changed

4 files changed

+0
-8
lines changed

tests/lora/test_lora_layers_sd3.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
require_big_gpu_with_torch_cuda,
3737
require_peft_backend,
3838
require_torch_gpu,
39-
slow,
4039
torch_device,
4140
)
4241

@@ -134,7 +133,6 @@ def test_modify_padding_mode(self):
134133
pass
135134

136135

137-
@slow
138136
@nightly
139137
@require_torch_gpu
140138
@require_peft_backend

tests/pipelines/controlnet_flux/test_controlnet_flux.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535
nightly,
3636
numpy_cosine_similarity_distance,
3737
require_big_gpu_with_torch_cuda,
38-
slow,
3938
torch_device,
4039
)
4140
from diffusers.utils.torch_utils import randn_tensor
@@ -205,7 +204,6 @@ def test_flux_image_output_shape(self):
205204
assert (output_height, output_width) == (expected_height, expected_width)
206205

207206

208-
@slow
209207
@nightly
210208
@require_big_gpu_with_torch_cuda
211209
@pytest.mark.big_gpu_with_torch_cuda

tests/pipelines/flux/test_pipeline_flux.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
nightly,
1313
numpy_cosine_similarity_distance,
1414
require_big_gpu_with_torch_cuda,
15-
slow,
1615
torch_device,
1716
)
1817

@@ -207,7 +206,6 @@ def test_flux_image_output_shape(self):
207206
assert (output_height, output_width) == (expected_height, expected_width)
208207

209208

210-
@slow
211209
@nightly
212210
@require_big_gpu_with_torch_cuda
213211
@pytest.mark.big_gpu_with_torch_cuda

tests/pipelines/mochi/test_mochi.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@
2828
numpy_cosine_similarity_distance,
2929
require_big_gpu_with_torch_cuda,
3030
require_torch_gpu,
31-
slow,
3231
torch_device,
3332
)
3433

@@ -263,7 +262,6 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
263262
)
264263

265264

266-
@slow
267265
@nightly
268266
@require_torch_gpu
269267
@require_big_gpu_with_torch_cuda

0 commit comments

Comments
 (0)