|
81 | 81 | python utils/print_env.py |
82 | 82 | - name: Slow PyTorch CUDA checkpoint tests on Ubuntu |
83 | 83 | env: |
84 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 84 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
85 | 85 | # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms |
86 | 86 | CUBLAS_WORKSPACE_CONFIG: :16:8 |
87 | 87 | run: | |
@@ -135,7 +135,7 @@ jobs: |
135 | 135 |
|
136 | 136 | - name: Run PyTorch CUDA tests |
137 | 137 | env: |
138 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 138 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
139 | 139 | # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms |
140 | 140 | CUBLAS_WORKSPACE_CONFIG: :16:8 |
141 | 141 | run: | |
@@ -165,6 +165,63 @@ jobs: |
165 | 165 | name: torch_cuda_${{ matrix.module }}_test_reports |
166 | 166 | path: reports |
167 | 167 |
|
| 168 | + torch_minimum_version_cuda_tests: |
| 169 | + name: Torch Minimum Version CUDA Tests |
| 170 | + runs-on: |
| 171 | + group: aws-g4dn-2xlarge |
| 172 | + container: |
| 173 | + image: diffusers/diffusers-pytorch-minimum-cuda |
| 174 | + options: --shm-size "16gb" --ipc host --gpus 0 |
| 175 | + defaults: |
| 176 | + run: |
| 177 | + shell: bash |
| 178 | + steps: |
| 179 | + - name: Checkout diffusers |
| 180 | + uses: actions/checkout@v3 |
| 181 | + with: |
| 182 | + fetch-depth: 2 |
| 183 | + |
| 184 | + - name: Install dependencies |
| 185 | + run: | |
| 186 | + python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" |
| 187 | + python -m uv pip install -e [quality,test] |
| 188 | + python -m uv pip install peft@git+https://github.com/huggingface/peft.git |
| 189 | + pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git |
| 190 | +
|
| 191 | + - name: Environment |
| 192 | + run: | |
| 193 | + python utils/print_env.py |
| 194 | +
|
| 195 | + - name: Run PyTorch CUDA tests |
| 196 | + env: |
| 197 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
| 198 | + # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms |
| 199 | + CUBLAS_WORKSPACE_CONFIG: :16:8 |
| 200 | + run: | |
| 201 | + python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ |
| 202 | + -s -v -k "not Flax and not Onnx" \ |
| 203 | + --make-reports=tests_torch_minimum_cuda \ |
| 204 | + tests/models/test_modeling_common.py \ |
| 205 | + tests/pipelines/test_pipelines_common.py \ |
| 206 | + tests/pipelines/test_pipeline_utils.py \ |
| 207 | + tests/pipelines/test_pipelines.py \ |
| 208 | + tests/pipelines/test_pipelines_auto.py \ |
| 209 | + tests/schedulers/test_schedulers.py \ |
| 210 | + tests/others |
| 211 | +
|
| 212 | + - name: Failure short reports |
| 213 | + if: ${{ failure() }} |
| 214 | + run: | |
| 215 | + cat reports/tests_torch_minimum_version_cuda_stats.txt |
| 216 | + cat reports/tests_torch_minimum_version_cuda_failures_short.txt |
| 217 | +
|
| 218 | + - name: Test suite reports artifacts |
| 219 | + if: ${{ always() }} |
| 220 | + uses: actions/upload-artifact@v4 |
| 221 | + with: |
| 222 | + name: torch_minimum_version_cuda_test_reports |
| 223 | + path: reports |
| 224 | + |
168 | 225 | flax_tpu_tests: |
169 | 226 | name: Flax TPU Tests |
170 | 227 | runs-on: docker-tpu |
@@ -192,7 +249,7 @@ jobs: |
192 | 249 |
|
193 | 250 | - name: Run slow Flax TPU tests |
194 | 251 | env: |
195 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 252 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
196 | 253 | run: | |
197 | 254 | python -m pytest -n 0 \ |
198 | 255 | -s -v -k "Flax" \ |
@@ -240,7 +297,7 @@ jobs: |
240 | 297 |
|
241 | 298 | - name: Run slow ONNXRuntime CUDA tests |
242 | 299 | env: |
243 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 300 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
244 | 301 | run: | |
245 | 302 | python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ |
246 | 303 | -s -v -k "Onnx" \ |
@@ -288,7 +345,7 @@ jobs: |
288 | 345 | python utils/print_env.py |
289 | 346 | - name: Run example tests on GPU |
290 | 347 | env: |
291 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 348 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
292 | 349 | RUN_COMPILE: yes |
293 | 350 | run: | |
294 | 351 | python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/ |
@@ -331,7 +388,7 @@ jobs: |
331 | 388 | python utils/print_env.py |
332 | 389 | - name: Run example tests on GPU |
333 | 390 | env: |
334 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 391 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
335 | 392 | run: | |
336 | 393 | python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/ |
337 | 394 | - name: Failure short reports |
@@ -377,7 +434,7 @@ jobs: |
377 | 434 |
|
378 | 435 | - name: Run example tests on GPU |
379 | 436 | env: |
380 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 437 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
381 | 438 | run: | |
382 | 439 | python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" |
383 | 440 | python -m uv pip install timm |
|
0 commit comments