diff --git a/.github/workflows/push_tests.yml b/.github/workflows/push_tests.yml index f07e6cda0d59..2289d1b5cad1 100644 --- a/.github/workflows/push_tests.yml +++ b/.github/workflows/push_tests.yml @@ -81,7 +81,7 @@ jobs: - name: Environment run: | python utils/print_env.py - - name: Slow PyTorch CUDA checkpoint tests on Ubuntu + - name: PyTorch CUDA checkpoint tests on Ubuntu env: HF_TOKEN: ${{ secrets.HF_TOKEN }} # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms @@ -184,7 +184,7 @@ jobs: run: | python utils/print_env.py - - name: Run slow Flax TPU tests + - name: Run Flax TPU tests env: HF_TOKEN: ${{ secrets.HF_TOKEN }} run: | @@ -232,7 +232,7 @@ jobs: run: | python utils/print_env.py - - name: Run slow ONNXRuntime CUDA tests + - name: Run ONNXRuntime CUDA tests env: HF_TOKEN: ${{ secrets.HF_TOKEN }} run: | diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 3bc46d1e9b13..b58525cc7a6f 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -27,6 +27,7 @@ from diffusers.utils.testing_utils import ( floats_tensor, is_peft_available, + nightly, numpy_cosine_similarity_distance, require_peft_backend, require_torch_gpu, @@ -165,9 +166,10 @@ def test_modify_padding_mode(self): @slow +@nightly @require_torch_gpu @require_peft_backend -# @unittest.skip("We cannot run inference on this model with the current CI hardware") +@unittest.skip("We cannot run inference on this model with the current CI hardware") # TODO (DN6, sayakpaul): move these tests to a beefier GPU class FluxLoRAIntegrationTests(unittest.TestCase): """internal note: The integration slices were obtained on audace. diff --git a/tests/lora/test_lora_layers_sd.py b/tests/lora/test_lora_layers_sd.py index 50187e50a912..e91b0689b4ce 100644 --- a/tests/lora/test_lora_layers_sd.py +++ b/tests/lora/test_lora_layers_sd.py @@ -34,6 +34,7 @@ from diffusers.utils.import_utils import is_accelerate_available from diffusers.utils.testing_utils import ( load_image, + nightly, numpy_cosine_similarity_distance, require_peft_backend, require_torch_gpu, @@ -207,6 +208,7 @@ def test_integration_move_lora_dora_cpu(self): @slow +@nightly @require_torch_gpu @require_peft_backend class LoraIntegrationTests(unittest.TestCase): diff --git a/tests/lora/test_lora_layers_sdxl.py b/tests/lora/test_lora_layers_sdxl.py index 94a44ed8f9ec..30238c74873b 100644 --- a/tests/lora/test_lora_layers_sdxl.py +++ b/tests/lora/test_lora_layers_sdxl.py @@ -113,6 +113,7 @@ def tearDown(self): @slow +@nightly @require_torch_gpu @require_peft_backend class LoraSDXLIntegrationTests(unittest.TestCase):