77
88env :
99 DIFFUSERS_IS_CI : yes
10- HF_HOME : /mnt/cache
10+ HF_HUB_ENABLE_HF_TRANSFER : 1
1111 OMP_NUM_THREADS : 8
1212 MKL_NUM_THREADS : 8
1313 PYTEST_TIMEOUT : 600
1818
1919jobs :
2020 setup_torch_cuda_pipeline_matrix :
21- name : Setup Torch Pipelines Matrix
22- runs-on : diffusers/diffusers-pytorch-cpu
21+ name : Setup Torch Pipelines CUDA Slow Tests Matrix
22+ runs-on :
23+ group : aws-general-8-plus
24+ container :
25+ image : diffusers/diffusers-pytorch-cpu
2326 outputs :
2427 pipeline_test_matrix : ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
2528 steps :
2629 - name : Checkout diffusers
2730 uses : actions/checkout@v3
2831 with :
2932 fetch-depth : 2
30- - name : Set up Python
31- uses : actions/setup-python@v4
32- with :
33- python-version : " 3.8"
3433 - name : Install dependencies
3534 run : |
36- pip install -e .
35+ pip install -e .[test]
3736 pip install huggingface_hub
3837 - name : Fetch Pipeline Matrix
3938 id : fetch_pipeline_matrix
@@ -50,36 +49,35 @@ jobs:
5049 path : reports
5150
5251 run_nightly_tests_for_torch_pipelines :
53- name : Torch Pipelines CUDA Nightly Tests
52+ name : Nightly Torch Pipelines CUDA Tests
5453 needs : setup_torch_cuda_pipeline_matrix
5554 strategy :
5655 fail-fast : false
56+ max-parallel : 8
5757 matrix :
5858 module : ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
59- runs-on : [single-gpu, nvidia-gpu, t4, ci]
59+ runs-on :
60+ group : aws-g4dn-2xlarge
6061 container :
6162 image : diffusers/diffusers-pytorch-cuda
62- options : --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ - -gpus 0
63+ options : --shm-size "16gb" --ipc host --gpus 0
6364 steps :
6465 - name : Checkout diffusers
6566 uses : actions/checkout@v3
6667 with :
6768 fetch-depth : 2
6869 - name : NVIDIA-SMI
6970 run : nvidia-smi
70-
7171 - name : Install dependencies
7272 run : |
7373 python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
7474 python -m uv pip install -e [quality,test]
7575 python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
7676 python -m uv pip install pytest-reportlog
77-
7877 - name : Environment
7978 run : |
8079 python utils/print_env.py
81-
82- - name : Nightly PyTorch CUDA checkpoint (pipelines) tests
80+ - name : Pipeline CUDA Test
8381 env :
8482 HF_TOKEN : ${{ secrets.HF_TOKEN }}
8583 # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
@@ -90,38 +88,37 @@ jobs:
9088 --make-reports=tests_pipeline_${{ matrix.module }}_cuda \
9189 --report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
9290 tests/pipelines/${{ matrix.module }}
93-
9491 - name : Failure short reports
9592 if : ${{ failure() }}
9693 run : |
9794 cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
9895 cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
99-
10096 - name : Test suite reports artifacts
10197 if : ${{ always() }}
10298 uses : actions/upload-artifact@v2
10399 with :
104100 name : pipeline_${{ matrix.module }}_test_reports
105101 path : reports
106-
107102 - name : Generate Report and Notify Channel
108103 if : always()
109104 run : |
110105 pip install slack_sdk tabulate
111- python scripts /log_reports.py >> $GITHUB_STEP_SUMMARY
106+ python utils /log_reports.py >> $GITHUB_STEP_SUMMARY
112107
113108 run_nightly_tests_for_other_torch_modules :
114- name : Torch Non-Pipelines CUDA Nightly Tests
115- runs-on : [single-gpu, nvidia-gpu, t4, ci]
109+ name : Nightly Torch CUDA Tests
110+ runs-on :
111+ group : aws-g4dn-2xlarge
116112 container :
117113 image : diffusers/diffusers-pytorch-cuda
118- options : --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ - -gpus 0
114+ options : --shm-size "16gb" --ipc host --gpus 0
119115 defaults :
120116 run :
121117 shell : bash
122118 strategy :
119+ max-parallel : 2
123120 matrix :
124- module : [models, schedulers, others, examples]
121+ module : [models, schedulers, lora, others, single_file , examples]
125122 steps :
126123 - name : Checkout diffusers
127124 uses : actions/checkout@v3
@@ -133,8 +130,8 @@ jobs:
133130 python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
134131 python -m uv pip install -e [quality,test]
135132 python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
133+ python -m uv pip install peft@git+https://github.com/huggingface/peft.git
136134 python -m uv pip install pytest-reportlog
137-
138135 - name : Environment
139136 run : python utils/print_env.py
140137
@@ -158,7 +155,6 @@ jobs:
158155 # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
159156 CUBLAS_WORKSPACE_CONFIG : :16:8
160157 run : |
161- python -m uv pip install peft@git+https://github.com/huggingface/peft.git
162158 python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
163159 -s -v --make-reports=examples_torch_cuda \
164160 --report-log=examples_torch_cuda.log \
@@ -181,64 +177,7 @@ jobs:
181177 if : always()
182178 run : |
183179 pip install slack_sdk tabulate
184- python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
185-
186- run_lora_nightly_tests :
187- name : Nightly LoRA Tests with PEFT and TORCH
188- runs-on : [single-gpu, nvidia-gpu, t4, ci]
189- container :
190- image : diffusers/diffusers-pytorch-cuda
191- options : --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0
192- defaults :
193- run :
194- shell : bash
195- steps :
196- - name : Checkout diffusers
197- uses : actions/checkout@v3
198- with :
199- fetch-depth : 2
200-
201- - name : Install dependencies
202- run : |
203- python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
204- python -m uv pip install -e [quality,test]
205- python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
206- python -m uv pip install peft@git+https://github.com/huggingface/peft.git
207- python -m uv pip install pytest-reportlog
208-
209- - name : Environment
210- run : python utils/print_env.py
211-
212- - name : Run nightly LoRA tests with PEFT and Torch
213- env :
214- HF_TOKEN : ${{ secrets.HF_TOKEN }}
215- # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
216- CUBLAS_WORKSPACE_CONFIG : :16:8
217- run : |
218- python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
219- -s -v -k "not Flax and not Onnx" \
220- --make-reports=tests_torch_lora_cuda \
221- --report-log=tests_torch_lora_cuda.log \
222- tests/lora
223-
224- - name : Failure short reports
225- if : ${{ failure() }}
226- run : |
227- cat reports/tests_torch_lora_cuda_stats.txt
228- cat reports/tests_torch_lora_cuda_failures_short.txt
229-
230- - name : Test suite reports artifacts
231- if : ${{ always() }}
232- uses : actions/upload-artifact@v2
233- with :
234- name : torch_lora_cuda_test_reports
235- path : reports
236-
237- - name : Generate Report and Notify Channel
238- if : always()
239- run : |
240- pip install slack_sdk tabulate
241- python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
180+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
242181
243182 run_flax_tpu_tests :
244183 name : Nightly Flax TPU Tests
@@ -294,14 +233,15 @@ jobs:
294233 if : always()
295234 run : |
296235 pip install slack_sdk tabulate
297- python scripts /log_reports.py >> $GITHUB_STEP_SUMMARY
236+ python utils /log_reports.py >> $GITHUB_STEP_SUMMARY
298237
299238 run_nightly_onnx_tests :
300239 name : Nightly ONNXRuntime CUDA tests on Ubuntu
301- runs-on : [single-gpu, nvidia-gpu, t4, ci]
240+ runs-on :
241+ group : aws-g4dn-2xlarge
302242 container :
303243 image : diffusers/diffusers-onnxruntime-cuda
304- options : --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
244+ options : --gpus 0 --shm-size "16gb" --ipc host
305245
306246 steps :
307247 - name : Checkout diffusers
@@ -318,11 +258,10 @@ jobs:
318258 python -m uv pip install -e [quality,test]
319259 python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
320260 python -m uv pip install pytest-reportlog
321-
322261 - name : Environment
323262 run : python utils/print_env.py
324263
325- - name : Run nightly ONNXRuntime CUDA tests
264+ - name : Run Nightly ONNXRuntime CUDA tests
326265 env :
327266 HF_TOKEN : ${{ secrets.HF_TOKEN }}
328267 run : |
@@ -349,7 +288,7 @@ jobs:
349288 if : always()
350289 run : |
351290 pip install slack_sdk tabulate
352- python scripts /log_reports.py >> $GITHUB_STEP_SUMMARY
291+ python utils /log_reports.py >> $GITHUB_STEP_SUMMARY
353292
354293 run_nightly_tests_apple_m1 :
355294 name : Nightly PyTorch MPS tests on MacOS
@@ -411,4 +350,4 @@ jobs:
411350 if : always()
412351 run : |
413352 pip install slack_sdk tabulate
414- python scripts /log_reports.py >> $GITHUB_STEP_SUMMARY
353+ python utils /log_reports.py >> $GITHUB_STEP_SUMMARY
0 commit comments