Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/nightly_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ jobs:
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
RUN_COMPILE: yes
run: |
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -m "is_torch_compile" --make-reports=tests_torch_compile_cuda tests/
- name: Failure short reports
if: ${{ failure() }}
run: cat reports/tests_torch_compile_cuda_failures_short.txt
Expand Down
42 changes: 0 additions & 42 deletions .github/workflows/push_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -156,48 +156,6 @@ jobs:
name: torch_cuda_test_reports_${{ matrix.module }}
path: reports

run_torch_compile_tests:
name: PyTorch Compile CUDA tests

runs-on:
group: aws-g4dn-2xlarge

container:
image: diffusers/diffusers-pytorch-cuda
options: --gpus all --shm-size "16gb" --ipc host

steps:
- name: Checkout diffusers
uses: actions/checkout@v3
with:
fetch-depth: 2

- name: NVIDIA-SMI
run: |
nvidia-smi
- name: Install dependencies
run: |
uv pip install -e ".[quality,training]"
- name: Environment
run: |
python utils/print_env.py
- name: Run example tests on GPU
env:
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
RUN_COMPILE: yes
run: |
pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
- name: Failure short reports
if: ${{ failure() }}
run: cat reports/tests_torch_compile_cuda_failures_short.txt

- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: torch_compile_test_reports
path: reports

run_xformers_tests:
name: PyTorch xformers CUDA tests

Expand Down
94 changes: 0 additions & 94 deletions .github/workflows/push_tests_fast.yml

This file was deleted.

1 change: 1 addition & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

def pytest_configure(config):
config.addinivalue_line("markers", "big_accelerator: marks tests as requiring big accelerator resources")
config.addinivalue_line("markers", "is_torch_compile: marks torch compilation tests")


def pytest_addoption(parser):
Expand Down
10 changes: 4 additions & 6 deletions tests/models/test_modeling_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,13 +69,12 @@
backend_synchronize,
check_if_dicts_are_equal,
get_python_version,
is_torch_compile,
numpy_cosine_similarity_distance,
require_peft_backend,
require_peft_version_greater,
require_torch_2,
require_torch_accelerator,
require_torch_accelerator_with_training,
require_torch_compile,
require_torch_multi_accelerator,
require_torch_version_greater,
run_test_in_subprocess,
Expand Down Expand Up @@ -783,7 +782,7 @@ def test_from_save_pretrained_variant(self, expected_max_diff=5e-5):
max_diff = (image - new_image).abs().max().item()
self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes")

@is_torch_compile
@require_torch_compile
@require_torch_2
@unittest.skipIf(
get_python_version == (3, 12),
Expand Down Expand Up @@ -2038,7 +2037,7 @@ def test_push_to_hub_library_name(self):

@require_torch_accelerator
@require_torch_2
@is_torch_compile
@require_torch_compile
@slow
@require_torch_version_greater("2.7.1")
class TorchCompileTesterMixin:
Expand Down Expand Up @@ -2160,9 +2159,8 @@ def test_compile_works_with_aot(self):
@require_torch_2
@require_torch_accelerator
@require_peft_backend
@require_peft_version_greater("0.14.0")
@require_torch_version_greater("2.7.1")
@is_torch_compile
@require_torch_compile
class LoraHotSwappingForModelTesterMixin:
"""Test that hotswapping does not result in recompilation on the model directly.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
from ..test_modeling_common import ModelTesterMixin


enable_full_determinism()


class SkyReelsV2Transformer3DTests(ModelTesterMixin, TorchCompileTesterMixin, unittest.TestCase):
class SkyReelsV2Transformer3DTests(ModelTesterMixin, unittest.TestCase):
model_class = SkyReelsV2Transformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
Expand Down
19 changes: 19 additions & 0 deletions tests/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,6 +447,25 @@ def require_big_accelerator(test_case):
)(test_case)


def require_torch_compile(test_case):
"""
Decorator marking compilation test.
"""
import pytest

test_case = pytest.mark.is_torch_compile(test_case)

if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)

import torch

if not (torch.cuda.is_available() or torch.xpu.is_available()):
return unittest.skip("test requires PyTorch CUDA")(test_case)

return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case)


def require_torch_accelerator_with_training(test_case):
"""Decorator marking a test that requires an accelerator with support for training."""
return unittest.skipUnless(
Expand Down
Loading