|
81 | 81 | python utils/print_env.py |
82 | 82 | - name: Slow PyTorch CUDA checkpoint tests on Ubuntu |
83 | 83 | env: |
84 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 84 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
85 | 85 | # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms |
86 | 86 | CUBLAS_WORKSPACE_CONFIG: :16:8 |
87 | 87 | run: | |
@@ -135,7 +135,7 @@ jobs: |
135 | 135 |
|
136 | 136 | - name: Run PyTorch CUDA tests |
137 | 137 | env: |
138 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 138 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
139 | 139 | # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms |
140 | 140 | CUBLAS_WORKSPACE_CONFIG: :16:8 |
141 | 141 | run: | |
@@ -186,7 +186,7 @@ jobs: |
186 | 186 |
|
187 | 187 | - name: Run PyTorch CUDA tests |
188 | 188 | env: |
189 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 189 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
190 | 190 | # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms |
191 | 191 | CUBLAS_WORKSPACE_CONFIG: :16:8 |
192 | 192 | run: | |
@@ -241,7 +241,7 @@ jobs: |
241 | 241 |
|
242 | 242 | - name: Run slow Flax TPU tests |
243 | 243 | env: |
244 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 244 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
245 | 245 | run: | |
246 | 246 | python -m pytest -n 0 \ |
247 | 247 | -s -v -k "Flax" \ |
@@ -289,7 +289,7 @@ jobs: |
289 | 289 |
|
290 | 290 | - name: Run slow ONNXRuntime CUDA tests |
291 | 291 | env: |
292 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 292 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
293 | 293 | run: | |
294 | 294 | python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ |
295 | 295 | -s -v -k "Onnx" \ |
@@ -337,7 +337,7 @@ jobs: |
337 | 337 | python utils/print_env.py |
338 | 338 | - name: Run example tests on GPU |
339 | 339 | env: |
340 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 340 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
341 | 341 | RUN_COMPILE: yes |
342 | 342 | run: | |
343 | 343 | python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/ |
@@ -380,7 +380,7 @@ jobs: |
380 | 380 | python utils/print_env.py |
381 | 381 | - name: Run example tests on GPU |
382 | 382 | env: |
383 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 383 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
384 | 384 | run: | |
385 | 385 | python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/ |
386 | 386 | - name: Failure short reports |
@@ -426,7 +426,7 @@ jobs: |
426 | 426 |
|
427 | 427 | - name: Run example tests on GPU |
428 | 428 | env: |
429 | | - HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 429 | + HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
430 | 430 | run: | |
431 | 431 | python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" |
432 | 432 | python -m uv pip install timm |
|
0 commit comments