19
19
jobs :
20
20
setup_torch_cuda_pipeline_matrix :
21
21
name : Setup Torch Pipelines Matrix
22
- runs-on : ubuntu-latest
22
+ runs-on : diffusers/diffusers-pytorch-cpu
23
23
outputs :
24
24
pipeline_test_matrix : ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
25
25
steps :
@@ -67,19 +67,19 @@ jobs:
67
67
fetch-depth : 2
68
68
- name : NVIDIA-SMI
69
69
run : nvidia-smi
70
-
70
+
71
71
- name : Install dependencies
72
72
run : |
73
73
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
74
74
python -m uv pip install -e [quality,test]
75
75
python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
76
76
python -m uv pip install pytest-reportlog
77
-
77
+
78
78
- name : Environment
79
79
run : |
80
80
python utils/print_env.py
81
-
82
- - name : Nightly PyTorch CUDA checkpoint (pipelines) tests
81
+
82
+ - name : Nightly PyTorch CUDA checkpoint (pipelines) tests
83
83
env :
84
84
HUGGING_FACE_HUB_TOKEN : ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
85
85
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
88
88
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
89
89
-s -v -k "not Flax and not Onnx" \
90
90
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
91
- --report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
91
+ --report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
92
92
tests/pipelines/${{ matrix.module }}
93
-
93
+
94
94
- name : Failure short reports
95
95
if : ${{ failure() }}
96
96
run : |
@@ -103,7 +103,7 @@ jobs:
103
103
with :
104
104
name : pipeline_${{ matrix.module }}_test_reports
105
105
path : reports
106
-
106
+
107
107
- name : Generate Report and Notify Channel
108
108
if : always()
109
109
run : |
@@ -139,7 +139,7 @@ jobs:
139
139
run : python utils/print_env.py
140
140
141
141
- name : Run nightly PyTorch CUDA tests for non-pipeline modules
142
- if : ${{ matrix.module != 'examples'}}
142
+ if : ${{ matrix.module != 'examples'}}
143
143
env :
144
144
HUGGING_FACE_HUB_TOKEN : ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
145
145
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
@@ -148,7 +148,7 @@ jobs:
148
148
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
149
149
-s -v -k "not Flax and not Onnx" \
150
150
--make-reports=tests_torch_${{ matrix.module }}_cuda \
151
- --report-log=tests_torch_${{ matrix.module }}_cuda.log \
151
+ --report-log=tests_torch_${{ matrix.module }}_cuda.log \
152
152
tests/${{ matrix.module }}
153
153
154
154
- name : Run nightly example tests with Torch
@@ -161,13 +161,13 @@ jobs:
161
161
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
162
162
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
163
163
-s -v --make-reports=examples_torch_cuda \
164
- --report-log=examples_torch_cuda.log \
164
+ --report-log=examples_torch_cuda.log \
165
165
examples/
166
166
167
167
- name : Failure short reports
168
168
if : ${{ failure() }}
169
169
run : |
170
- cat reports/tests_torch_${{ matrix.module }}_cuda_stats.txt
170
+ cat reports/tests_torch_${{ matrix.module }}_cuda_stats.txt
171
171
cat reports/tests_torch_${{ matrix.module }}_cuda_failures_short.txt
172
172
173
173
- name : Test suite reports artifacts
@@ -218,13 +218,13 @@ jobs:
218
218
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
219
219
-s -v -k "not Flax and not Onnx" \
220
220
--make-reports=tests_torch_lora_cuda \
221
- --report-log=tests_torch_lora_cuda.log \
221
+ --report-log=tests_torch_lora_cuda.log \
222
222
tests/lora
223
-
223
+
224
224
- name : Failure short reports
225
225
if : ${{ failure() }}
226
226
run : |
227
- cat reports/tests_torch_lora_cuda_stats.txt
227
+ cat reports/tests_torch_lora_cuda_stats.txt
228
228
cat reports/tests_torch_lora_cuda_failures_short.txt
229
229
230
230
- name : Test suite reports artifacts
@@ -239,12 +239,12 @@ jobs:
239
239
run : |
240
240
pip install slack_sdk tabulate
241
241
python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
242
-
242
+
243
243
run_flax_tpu_tests :
244
244
name : Nightly Flax TPU Tests
245
245
runs-on : docker-tpu
246
246
if : github.event_name == 'schedule'
247
-
247
+
248
248
container :
249
249
image : diffusers/diffusers-flax-tpu
250
250
options : --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --privileged
@@ -274,7 +274,7 @@ jobs:
274
274
python -m pytest -n 0 \
275
275
-s -v -k "Flax" \
276
276
--make-reports=tests_flax_tpu \
277
- --report-log=tests_flax_tpu.log \
277
+ --report-log=tests_flax_tpu.log \
278
278
tests/
279
279
280
280
- name : Failure short reports
@@ -302,7 +302,7 @@ jobs:
302
302
container :
303
303
image : diffusers/diffusers-onnxruntime-cuda
304
304
options : --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
305
-
305
+
306
306
steps :
307
307
- name : Checkout diffusers
308
308
uses : actions/checkout@v3
@@ -321,15 +321,15 @@ jobs:
321
321
322
322
- name : Environment
323
323
run : python utils/print_env.py
324
-
324
+
325
325
- name : Run nightly ONNXRuntime CUDA tests
326
326
env :
327
327
HUGGING_FACE_HUB_TOKEN : ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
328
328
run : |
329
329
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
330
330
-s -v -k "Onnx" \
331
331
--make-reports=tests_onnx_cuda \
332
- --report-log=tests_onnx_cuda.log \
332
+ --report-log=tests_onnx_cuda.log \
333
333
tests/
334
334
335
335
- name : Failure short reports
@@ -344,7 +344,7 @@ jobs:
344
344
with :
345
345
name : ${{ matrix.config.report }}_test_reports
346
346
path : reports
347
-
347
+
348
348
- name : Generate Report and Notify Channel
349
349
if : always()
350
350
run : |
0 commit comments