|  | 
|  | 1 | +name: Fast GPU Tests on PR  | 
|  | 2 | + | 
|  | 3 | +on: | 
|  | 4 | +  pull_request: | 
|  | 5 | +    branches: main | 
|  | 6 | +    paths: | 
|  | 7 | +      - "src/diffusers/models/modeling_utils.py" | 
|  | 8 | +      - "src/diffusers/models/model_loading_utils.py" | 
|  | 9 | +      - "src/diffusers/pipelines/pipeline_utils.py" | 
|  | 10 | +      - "src/diffusers/pipeline_loading_utils.py" | 
|  | 11 | +      - "src/diffusers/loaders/lora_base.py" | 
|  | 12 | +      - "src/diffusers/loaders/lora_pipeline.py" | 
|  | 13 | +      - "src/diffusers/loaders/peft.py" | 
|  | 14 | +      - "tests/pipelines/test_pipelines_common.py" | 
|  | 15 | +      - "tests/models/test_modeling_common.py" | 
|  | 16 | +  workflow_dispatch: | 
|  | 17 | + | 
|  | 18 | +concurrency: | 
|  | 19 | +  group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} | 
|  | 20 | +  cancel-in-progress: true | 
|  | 21 | + | 
|  | 22 | +env: | 
|  | 23 | +  DIFFUSERS_IS_CI: yes | 
|  | 24 | +  OMP_NUM_THREADS: 8 | 
|  | 25 | +  MKL_NUM_THREADS: 8 | 
|  | 26 | +  HF_HUB_ENABLE_HF_TRANSFER: 1 | 
|  | 27 | +  PYTEST_TIMEOUT: 600 | 
|  | 28 | +  PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run | 
|  | 29 | + | 
|  | 30 | +jobs: | 
|  | 31 | +  setup_torch_cuda_pipeline_matrix: | 
|  | 32 | +    name: Setup Torch Pipelines CUDA Slow Tests Matrix | 
|  | 33 | +    runs-on: | 
|  | 34 | +      group: aws-general-8-plus | 
|  | 35 | +    container: | 
|  | 36 | +      image: diffusers/diffusers-pytorch-cpu | 
|  | 37 | +    outputs: | 
|  | 38 | +      pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }} | 
|  | 39 | +    steps: | 
|  | 40 | +      - name: Checkout diffusers | 
|  | 41 | +        uses: actions/checkout@v3 | 
|  | 42 | +        with: | 
|  | 43 | +          fetch-depth: 2 | 
|  | 44 | +      - name: Install dependencies | 
|  | 45 | +        run: | | 
|  | 46 | +          python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | 
|  | 47 | +          python -m uv pip install -e [quality,test] | 
|  | 48 | +      - name: Environment | 
|  | 49 | +        run: | | 
|  | 50 | +          python utils/print_env.py | 
|  | 51 | +      - name: Fetch Pipeline Matrix | 
|  | 52 | +        id: fetch_pipeline_matrix | 
|  | 53 | +        run: | | 
|  | 54 | +          matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py) | 
|  | 55 | +          echo $matrix | 
|  | 56 | +          echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT | 
|  | 57 | +      - name: Pipeline Tests Artifacts | 
|  | 58 | +        if: ${{ always() }} | 
|  | 59 | +        uses: actions/upload-artifact@v4 | 
|  | 60 | +        with: | 
|  | 61 | +          name: test-pipelines.json | 
|  | 62 | +          path: reports | 
|  | 63 | + | 
|  | 64 | +  torch_pipelines_cuda_tests: | 
|  | 65 | +    name: Torch Pipelines CUDA Tests | 
|  | 66 | +    needs: setup_torch_cuda_pipeline_matrix | 
|  | 67 | +    strategy: | 
|  | 68 | +      fail-fast: false | 
|  | 69 | +      max-parallel: 8 | 
|  | 70 | +      matrix: | 
|  | 71 | +        module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }} | 
|  | 72 | +    runs-on: | 
|  | 73 | +      group: aws-g4dn-2xlarge | 
|  | 74 | +    container: | 
|  | 75 | +      image: diffusers/diffusers-pytorch-cuda | 
|  | 76 | +      options: --shm-size "16gb" --ipc host --gpus 0 | 
|  | 77 | +    steps: | 
|  | 78 | +      - name: Checkout diffusers | 
|  | 79 | +        uses: actions/checkout@v3 | 
|  | 80 | +        with: | 
|  | 81 | +          fetch-depth: 2 | 
|  | 82 | + | 
|  | 83 | +      - name: NVIDIA-SMI | 
|  | 84 | +        run: | | 
|  | 85 | +          nvidia-smi | 
|  | 86 | +      - name: Install dependencies | 
|  | 87 | +        run: | | 
|  | 88 | +          python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | 
|  | 89 | +          python -m uv pip install -e [quality,test] | 
|  | 90 | +          pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git | 
|  | 91 | +          pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps | 
|  | 92 | +
 | 
|  | 93 | +      - name: Environment | 
|  | 94 | +        run: | | 
|  | 95 | +          python utils/print_env.py | 
|  | 96 | +      - name: Extract tests | 
|  | 97 | +        id: extract_tests | 
|  | 98 | +        run: | | 
|  | 99 | +          pattern=$(python utils/extract_tests_from_mixin.py --type pipeline) | 
|  | 100 | +          echo "$pattern" > /tmp/test_pattern.txt | 
|  | 101 | +          echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT | 
|  | 102 | +
 | 
|  | 103 | +      - name: PyTorch CUDA checkpoint tests on Ubuntu | 
|  | 104 | +        env: | 
|  | 105 | +          HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} | 
|  | 106 | +          # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms | 
|  | 107 | +          CUBLAS_WORKSPACE_CONFIG: :16:8 | 
|  | 108 | +        run: | | 
|  | 109 | +          if [ "${{ matrix.module }}" = "ip_adapters" ]; then  | 
|  | 110 | +              python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ | 
|  | 111 | +              -s -v -k "not Flax and not Onnx" \ | 
|  | 112 | +              --make-reports=tests_pipeline_${{ matrix.module }}_cuda \ | 
|  | 113 | +              tests/pipelines/${{ matrix.module }} | 
|  | 114 | +          else  | 
|  | 115 | +              pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }}) | 
|  | 116 | +              python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ | 
|  | 117 | +              -s -v -k "not Flax and not Onnx and $pattern" \ | 
|  | 118 | +              --make-reports=tests_pipeline_${{ matrix.module }}_cuda \ | 
|  | 119 | +              tests/pipelines/${{ matrix.module }} | 
|  | 120 | +          fi  | 
|  | 121 | +
 | 
|  | 122 | +      - name: Failure short reports | 
|  | 123 | +        if: ${{ failure() }} | 
|  | 124 | +        run: | | 
|  | 125 | +          cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt | 
|  | 126 | +          cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt | 
|  | 127 | +      - name: Test suite reports artifacts | 
|  | 128 | +        if: ${{ always() }} | 
|  | 129 | +        uses: actions/upload-artifact@v4 | 
|  | 130 | +        with: | 
|  | 131 | +          name: pipeline_${{ matrix.module }}_test_reports | 
|  | 132 | +          path: reports | 
|  | 133 | + | 
|  | 134 | +  torch_cuda_tests: | 
|  | 135 | +    name: Torch CUDA Tests | 
|  | 136 | +    runs-on: | 
|  | 137 | +      group: aws-g4dn-2xlarge | 
|  | 138 | +    container: | 
|  | 139 | +      image: diffusers/diffusers-pytorch-cuda | 
|  | 140 | +      options: --shm-size "16gb" --ipc host --gpus 0 | 
|  | 141 | +    defaults: | 
|  | 142 | +      run: | 
|  | 143 | +        shell: bash | 
|  | 144 | +    strategy: | 
|  | 145 | +      fail-fast: false | 
|  | 146 | +      max-parallel: 2 | 
|  | 147 | +      matrix: | 
|  | 148 | +        module: [models, schedulers, lora, others] | 
|  | 149 | +    steps: | 
|  | 150 | +    - name: Checkout diffusers | 
|  | 151 | +      uses: actions/checkout@v3 | 
|  | 152 | +      with: | 
|  | 153 | +        fetch-depth: 2 | 
|  | 154 | + | 
|  | 155 | +    - name: Install dependencies | 
|  | 156 | +      run: | | 
|  | 157 | +        python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | 
|  | 158 | +        python -m uv pip install -e [quality,test] | 
|  | 159 | +        python -m uv pip install peft@git+https://github.com/huggingface/peft.git | 
|  | 160 | +        pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git | 
|  | 161 | +        pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps | 
|  | 162 | +
 | 
|  | 163 | +    - name: Environment | 
|  | 164 | +      run: | | 
|  | 165 | +        python utils/print_env.py | 
|  | 166 | +
 | 
|  | 167 | +    - name: Extract tests | 
|  | 168 | +      id: extract_tests | 
|  | 169 | +      run: | | 
|  | 170 | +        pattern=$(python utils/extract_tests_from_mixin.py --type ${{ matrix.module }}) | 
|  | 171 | +        echo "$pattern" > /tmp/test_pattern.txt | 
|  | 172 | +        echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT | 
|  | 173 | +
 | 
|  | 174 | +    - name: Run PyTorch CUDA tests | 
|  | 175 | +      env: | 
|  | 176 | +        HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} | 
|  | 177 | +        # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms | 
|  | 178 | +        CUBLAS_WORKSPACE_CONFIG: :16:8 | 
|  | 179 | +      run: | | 
|  | 180 | +        pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }}) | 
|  | 181 | +        if [ -z "$pattern" ]; then | 
|  | 182 | +          python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \ | 
|  | 183 | +          --make-reports=tests_torch_cuda_${{ matrix.module }}   | 
|  | 184 | +        else | 
|  | 185 | +          python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \ | 
|  | 186 | +          --make-reports=tests_torch_cuda_${{ matrix.module }}   | 
|  | 187 | +        fi | 
|  | 188 | +
 | 
|  | 189 | +    - name: Failure short reports | 
|  | 190 | +      if: ${{ failure() }} | 
|  | 191 | +      run: | | 
|  | 192 | +        cat reports/tests_torch_cuda_${{ matrix.module }}_stats.txt | 
|  | 193 | +        cat reports/tests_torch_cuda_${{ matrix.module }}_failures_short.txt | 
|  | 194 | +
 | 
|  | 195 | +    - name: Test suite reports artifacts | 
|  | 196 | +      if: ${{ always() }} | 
|  | 197 | +      uses: actions/upload-artifact@v4 | 
|  | 198 | +      with: | 
|  | 199 | +        name: torch_cuda_test_reports_${{ matrix.module }} | 
|  | 200 | +        path: reports | 
|  | 201 | + | 
|  | 202 | +  run_examples_tests: | 
|  | 203 | +    name: Examples PyTorch CUDA tests on Ubuntu | 
|  | 204 | +        pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps | 
|  | 205 | +    runs-on: | 
|  | 206 | +      group: aws-g4dn-2xlarge | 
|  | 207 | + | 
|  | 208 | +    container: | 
|  | 209 | +      image: diffusers/diffusers-pytorch-cuda | 
|  | 210 | +      options: --gpus 0 --shm-size "16gb" --ipc host | 
|  | 211 | +    steps: | 
|  | 212 | +    - name: Checkout diffusers | 
|  | 213 | +      uses: actions/checkout@v3 | 
|  | 214 | +      with: | 
|  | 215 | +        fetch-depth: 2 | 
|  | 216 | + | 
|  | 217 | +    - name: NVIDIA-SMI | 
|  | 218 | +      run: | | 
|  | 219 | +        nvidia-smi | 
|  | 220 | +    - name: Install dependencies | 
|  | 221 | +      run: | | 
|  | 222 | +        python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | 
|  | 223 | +        python -m uv pip install -e [quality,test,training] | 
|  | 224 | +
 | 
|  | 225 | +    - name: Environment | 
|  | 226 | +      run: | | 
|  | 227 | +        python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | 
|  | 228 | +        python utils/print_env.py | 
|  | 229 | +
 | 
|  | 230 | +    - name: Run example tests on GPU | 
|  | 231 | +      env: | 
|  | 232 | +        HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} | 
|  | 233 | +      run: | | 
|  | 234 | +        python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" | 
|  | 235 | +        python -m uv pip install timm | 
|  | 236 | +        python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/ | 
|  | 237 | +
 | 
|  | 238 | +    - name: Failure short reports | 
|  | 239 | +      if: ${{ failure() }} | 
|  | 240 | +      run: | | 
|  | 241 | +        cat reports/examples_torch_cuda_stats.txt | 
|  | 242 | +        cat reports/examples_torch_cuda_failures_short.txt | 
|  | 243 | +
 | 
|  | 244 | +    - name: Test suite reports artifacts | 
|  | 245 | +      if: ${{ always() }} | 
|  | 246 | +      uses: actions/upload-artifact@v4 | 
|  | 247 | +      with: | 
|  | 248 | +        name: examples_test_reports | 
|  | 249 | +        path: reports | 
|  | 250 | + | 
0 commit comments