1919jobs :
2020 setup_torch_cuda_pipeline_matrix :
2121 name : Setup Torch Pipelines Matrix
22- runs-on : ubuntu-latest
22+ runs-on : diffusers/diffusers-pytorch-cpu
2323 outputs :
2424 pipeline_test_matrix : ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
2525 steps :
@@ -67,20 +67,19 @@ jobs:
6767 fetch-depth : 2
6868 - name : NVIDIA-SMI
6969 run : nvidia-smi
70-
70+
7171 - name : Install dependencies
7272 run : |
73- apt-get update && apt-get install libsndfile1-dev libgl1 -y
7473 python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
7574 python -m uv pip install -e [quality,test]
7675 python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
7776 python -m uv pip install pytest-reportlog
78-
77+
7978 - name : Environment
8079 run : |
8180 python utils/print_env.py
82-
83- - name : Nightly PyTorch CUDA checkpoint (pipelines) tests
81+
82+ - name : Nightly PyTorch CUDA checkpoint (pipelines) tests
8483 env :
8584 HUGGING_FACE_HUB_TOKEN : ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
8685 # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
8988 python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
9089 -s -v -k "not Flax and not Onnx" \
9190 --make-reports=tests_pipeline_${{ matrix.module }}_cuda \
92- --report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
91+ --report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
9392 tests/pipelines/${{ matrix.module }}
94-
93+
9594 - name : Failure short reports
9695 if : ${{ failure() }}
9796 run : |
@@ -104,7 +103,7 @@ jobs:
104103 with :
105104 name : pipeline_${{ matrix.module }}_test_reports
106105 path : reports
107-
106+
108107 - name : Generate Report and Notify Channel
109108 if : always()
110109 run : |
@@ -113,7 +112,7 @@ jobs:
113112
114113 run_nightly_tests_for_other_torch_modules :
115114 name : Torch Non-Pipelines CUDA Nightly Tests
116- runs-on : docker -gpu
115+ runs-on : [single -gpu, nvidia-gpu, t4, ci]
117116 container :
118117 image : diffusers/diffusers-pytorch-cuda
119118 options : --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
@@ -131,7 +130,6 @@ jobs:
131130
132131 - name : Install dependencies
133132 run : |
134- apt-get update && apt-get install libsndfile1-dev libgl1 -y
135133 python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
136134 python -m uv pip install -e [quality,test]
137135 python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
@@ -141,7 +139,7 @@ jobs:
141139 run : python utils/print_env.py
142140
143141 - name : Run nightly PyTorch CUDA tests for non-pipeline modules
144- if : ${{ matrix.module != 'examples'}}
142+ if : ${{ matrix.module != 'examples'}}
145143 env :
146144 HUGGING_FACE_HUB_TOKEN : ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
147145 # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
@@ -150,7 +148,7 @@ jobs:
150148 python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
151149 -s -v -k "not Flax and not Onnx" \
152150 --make-reports=tests_torch_${{ matrix.module }}_cuda \
153- --report-log=tests_torch_${{ matrix.module }}_cuda.log \
151+ --report-log=tests_torch_${{ matrix.module }}_cuda.log \
154152 tests/${{ matrix.module }}
155153
156154 - name : Run nightly example tests with Torch
@@ -163,13 +161,13 @@ jobs:
163161 python -m uv pip install peft@git+https://github.com/huggingface/peft.git
164162 python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
165163 -s -v --make-reports=examples_torch_cuda \
166- --report-log=examples_torch_cuda.log \
164+ --report-log=examples_torch_cuda.log \
167165 examples/
168166
169167 - name : Failure short reports
170168 if : ${{ failure() }}
171169 run : |
172- cat reports/tests_torch_${{ matrix.module }}_cuda_stats.txt
170+ cat reports/tests_torch_${{ matrix.module }}_cuda_stats.txt
173171 cat reports/tests_torch_${{ matrix.module }}_cuda_failures_short.txt
174172
175173 - name : Test suite reports artifacts
@@ -187,7 +185,7 @@ jobs:
187185
188186 run_lora_nightly_tests :
189187 name : Nightly LoRA Tests with PEFT and TORCH
190- runs-on : docker -gpu
188+ runs-on : [single -gpu, nvidia-gpu, t4, ci]
191189 container :
192190 image : diffusers/diffusers-pytorch-cuda
193191 options : --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
@@ -202,7 +200,6 @@ jobs:
202200
203201 - name : Install dependencies
204202 run : |
205- apt-get update && apt-get install libsndfile1-dev libgl1 -y
206203 python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
207204 python -m uv pip install -e [quality,test]
208205 python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
@@ -221,13 +218,13 @@ jobs:
221218 python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
222219 -s -v -k "not Flax and not Onnx" \
223220 --make-reports=tests_torch_lora_cuda \
224- --report-log=tests_torch_lora_cuda.log \
221+ --report-log=tests_torch_lora_cuda.log \
225222 tests/lora
226-
223+
227224 - name : Failure short reports
228225 if : ${{ failure() }}
229226 run : |
230- cat reports/tests_torch_lora_cuda_stats.txt
227+ cat reports/tests_torch_lora_cuda_stats.txt
231228 cat reports/tests_torch_lora_cuda_failures_short.txt
232229
233230 - name : Test suite reports artifacts
@@ -242,12 +239,12 @@ jobs:
242239 run : |
243240 pip install slack_sdk tabulate
244241 python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
245-
242+
246243 run_flax_tpu_tests :
247244 name : Nightly Flax TPU Tests
248245 runs-on : docker-tpu
249246 if : github.event_name == 'schedule'
250-
247+
251248 container :
252249 image : diffusers/diffusers-flax-tpu
253250 options : --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --privileged
@@ -262,7 +259,6 @@ jobs:
262259
263260 - name : Install dependencies
264261 run : |
265- apt-get update && apt-get install libsndfile1-dev libgl1 -y
266262 python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
267263 python -m uv pip install -e [quality,test]
268264 python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
@@ -278,7 +274,7 @@ jobs:
278274 python -m pytest -n 0 \
279275 -s -v -k "Flax" \
280276 --make-reports=tests_flax_tpu \
281- --report-log=tests_flax_tpu.log \
277+ --report-log=tests_flax_tpu.log \
282278 tests/
283279
284280 - name : Failure short reports
@@ -302,11 +298,11 @@ jobs:
302298
303299 run_nightly_onnx_tests :
304300 name : Nightly ONNXRuntime CUDA tests on Ubuntu
305- runs-on : docker -gpu
301+ runs-on : [single -gpu, nvidia-gpu, t4, ci]
306302 container :
307303 image : diffusers/diffusers-onnxruntime-cuda
308304 options : --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
309-
305+
310306 steps :
311307 - name : Checkout diffusers
312308 uses : actions/checkout@v3
@@ -325,15 +321,15 @@ jobs:
325321
326322 - name : Environment
327323 run : python utils/print_env.py
328-
324+
329325 - name : Run nightly ONNXRuntime CUDA tests
330326 env :
331327 HUGGING_FACE_HUB_TOKEN : ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
332328 run : |
333329 python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
334330 -s -v -k "Onnx" \
335331 --make-reports=tests_onnx_cuda \
336- --report-log=tests_onnx_cuda.log \
332+ --report-log=tests_onnx_cuda.log \
337333 tests/
338334
339335 - name : Failure short reports
@@ -348,7 +344,7 @@ jobs:
348344 with :
349345 name : ${{ matrix.config.report }}_test_reports
350346 path : reports
351-
347+
352348 - name : Generate Report and Notify Channel
353349 if : always()
354350 run : |
0 commit comments