Skip to content

Commit ee52734

Browse files
authored
Merge branch 'main' into fix-cosmos-attn
2 parents fff9e31 + 76a62ac commit ee52734

File tree

354 files changed

+28939
-13387
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

354 files changed

+28939
-13387
lines changed

.github/workflows/benchmark.yml

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -11,17 +11,18 @@ env:
1111
HF_HOME: /mnt/cache
1212
OMP_NUM_THREADS: 8
1313
MKL_NUM_THREADS: 8
14+
BASE_PATH: benchmark_outputs
1415

1516
jobs:
16-
torch_pipelines_cuda_benchmark_tests:
17+
torch_models_cuda_benchmark_tests:
1718
env:
1819
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_BENCHMARK }}
19-
name: Torch Core Pipelines CUDA Benchmarking Tests
20+
name: Torch Core Models CUDA Benchmarking Tests
2021
strategy:
2122
fail-fast: false
2223
max-parallel: 1
2324
runs-on:
24-
group: aws-g6-4xlarge-plus
25+
group: aws-g6e-4xlarge
2526
container:
2627
image: diffusers/diffusers-pytorch-cuda
2728
options: --shm-size "16gb" --ipc host --gpus 0
@@ -35,27 +36,47 @@ jobs:
3536
nvidia-smi
3637
- name: Install dependencies
3738
run: |
39+
apt update
40+
apt install -y libpq-dev postgresql-client
3841
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
3942
python -m uv pip install -e [quality,test]
40-
python -m uv pip install pandas peft
41-
python -m uv pip uninstall transformers && python -m uv pip install transformers==4.48.0
43+
python -m uv pip install -r benchmarks/requirements.txt
4244
- name: Environment
4345
run: |
4446
python utils/print_env.py
4547
- name: Diffusers Benchmarking
4648
env:
47-
HF_TOKEN: ${{ secrets.DIFFUSERS_BOT_TOKEN }}
48-
BASE_PATH: benchmark_outputs
49+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
4950
run: |
50-
export TOTAL_GPU_MEMORY=$(python -c "import torch; print(torch.cuda.get_device_properties(0).total_memory / (1024**3))")
51-
cd benchmarks && mkdir ${BASE_PATH} && python run_all.py && python push_results.py
51+
cd benchmarks && python run_all.py
52+
53+
- name: Push results to the Hub
54+
env:
55+
HF_TOKEN: ${{ secrets.DIFFUSERS_BOT_TOKEN }}
56+
run: |
57+
cd benchmarks && python push_results.py
58+
mkdir $BASE_PATH && cp *.csv $BASE_PATH
5259
5360
- name: Test suite reports artifacts
5461
if: ${{ always() }}
5562
uses: actions/upload-artifact@v4
5663
with:
5764
name: benchmark_test_reports
58-
path: benchmarks/benchmark_outputs
65+
path: benchmarks/${{ env.BASE_PATH }}
66+
67+
# TODO: enable this once the connection problem has been resolved.
68+
- name: Update benchmarking results to DB
69+
env:
70+
PGDATABASE: metrics
71+
PGHOST: ${{ secrets.DIFFUSERS_BENCHMARKS_PGHOST }}
72+
PGUSER: transformers_benchmarks
73+
PGPASSWORD: ${{ secrets.DIFFUSERS_BENCHMARKS_PGPASSWORD }}
74+
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
75+
run: |
76+
git config --global --add safe.directory /__w/diffusers/diffusers
77+
commit_id=$GITHUB_SHA
78+
commit_msg=$(git show -s --format=%s "$commit_id" | cut -c1-70)
79+
cd benchmarks && python populate_into_db.py "$BRANCH_NAME" "$commit_id" "$commit_msg"
5980
6081
- name: Report success status
6182
if: ${{ success() }}

.github/workflows/build_docker_images.yml

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,6 @@ jobs:
7575
- diffusers-pytorch-cuda
7676
- diffusers-pytorch-xformers-cuda
7777
- diffusers-pytorch-minimum-cuda
78-
- diffusers-flax-cpu
79-
- diffusers-flax-tpu
80-
- diffusers-onnxruntime-cpu
81-
- diffusers-onnxruntime-cuda
8278
- diffusers-doc-builder
8379

8480
steps:

.github/workflows/nightly_tests.yml

Lines changed: 3 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ jobs:
248248
BIG_GPU_MEMORY: 40
249249
run: |
250250
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
251-
-m "big_gpu_with_torch_cuda" \
251+
-m "big_accelerator" \
252252
--make-reports=tests_big_gpu_torch_cuda \
253253
--report-log=tests_big_gpu_torch_cuda.log \
254254
tests/
@@ -321,55 +321,6 @@ jobs:
321321
name: torch_minimum_version_cuda_test_reports
322322
path: reports
323323

324-
run_nightly_onnx_tests:
325-
name: Nightly ONNXRuntime CUDA tests on Ubuntu
326-
runs-on:
327-
group: aws-g4dn-2xlarge
328-
container:
329-
image: diffusers/diffusers-onnxruntime-cuda
330-
options: --gpus 0 --shm-size "16gb" --ipc host
331-
332-
steps:
333-
- name: Checkout diffusers
334-
uses: actions/checkout@v3
335-
with:
336-
fetch-depth: 2
337-
338-
- name: NVIDIA-SMI
339-
run: nvidia-smi
340-
341-
- name: Install dependencies
342-
run: |
343-
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
344-
python -m uv pip install -e [quality,test]
345-
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
346-
python -m uv pip install pytest-reportlog
347-
- name: Environment
348-
run: python utils/print_env.py
349-
350-
- name: Run Nightly ONNXRuntime CUDA tests
351-
env:
352-
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
353-
run: |
354-
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
355-
-s -v -k "Onnx" \
356-
--make-reports=tests_onnx_cuda \
357-
--report-log=tests_onnx_cuda.log \
358-
tests/
359-
360-
- name: Failure short reports
361-
if: ${{ failure() }}
362-
run: |
363-
cat reports/tests_onnx_cuda_stats.txt
364-
cat reports/tests_onnx_cuda_failures_short.txt
365-
366-
- name: Test suite reports artifacts
367-
if: ${{ always() }}
368-
uses: actions/upload-artifact@v4
369-
with:
370-
name: tests_onnx_cuda_reports
371-
path: reports
372-
373324
run_nightly_quantization_tests:
374325
name: Torch quantization nightly tests
375326
strategy:
@@ -485,57 +436,6 @@ jobs:
485436
name: torch_cuda_pipeline_level_quant_reports
486437
path: reports
487438

488-
run_flax_tpu_tests:
489-
name: Nightly Flax TPU Tests
490-
runs-on:
491-
group: gcp-ct5lp-hightpu-8t
492-
if: github.event_name == 'schedule'
493-
494-
container:
495-
image: diffusers/diffusers-flax-tpu
496-
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
497-
defaults:
498-
run:
499-
shell: bash
500-
steps:
501-
- name: Checkout diffusers
502-
uses: actions/checkout@v3
503-
with:
504-
fetch-depth: 2
505-
506-
- name: Install dependencies
507-
run: |
508-
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
509-
python -m uv pip install -e [quality,test]
510-
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
511-
python -m uv pip install pytest-reportlog
512-
513-
- name: Environment
514-
run: python utils/print_env.py
515-
516-
- name: Run nightly Flax TPU tests
517-
env:
518-
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
519-
run: |
520-
python -m pytest -n 0 \
521-
-s -v -k "Flax" \
522-
--make-reports=tests_flax_tpu \
523-
--report-log=tests_flax_tpu.log \
524-
tests/
525-
526-
- name: Failure short reports
527-
if: ${{ failure() }}
528-
run: |
529-
cat reports/tests_flax_tpu_stats.txt
530-
cat reports/tests_flax_tpu_failures_short.txt
531-
532-
- name: Test suite reports artifacts
533-
if: ${{ always() }}
534-
uses: actions/upload-artifact@v4
535-
with:
536-
name: flax_tpu_test_reports
537-
path: reports
538-
539439
generate_consolidated_report:
540440
name: Generate Consolidated Test Report
541441
needs: [
@@ -545,9 +445,9 @@ jobs:
545445
run_big_gpu_torch_tests,
546446
run_nightly_quantization_tests,
547447
run_nightly_pipeline_level_quantization_tests,
548-
run_nightly_onnx_tests,
448+
# run_nightly_onnx_tests,
549449
torch_minimum_version_cuda_tests,
550-
run_flax_tpu_tests
450+
# run_flax_tpu_tests
551451
]
552452
if: always()
553453
runs-on:

.github/workflows/pr_tests.yml

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -87,11 +87,6 @@ jobs:
8787
runner: aws-general-8-plus
8888
image: diffusers/diffusers-pytorch-cpu
8989
report: torch_cpu_models_schedulers
90-
- name: Fast Flax CPU tests
91-
framework: flax
92-
runner: aws-general-8-plus
93-
image: diffusers/diffusers-flax-cpu
94-
report: flax_cpu
9590
- name: PyTorch Example CPU tests
9691
framework: pytorch_examples
9792
runner: aws-general-8-plus
@@ -147,15 +142,6 @@ jobs:
147142
--make-reports=tests_${{ matrix.config.report }} \
148143
tests/models tests/schedulers tests/others
149144
150-
- name: Run fast Flax TPU tests
151-
if: ${{ matrix.config.framework == 'flax' }}
152-
run: |
153-
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
154-
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
155-
-s -v -k "Flax" \
156-
--make-reports=tests_${{ matrix.config.report }} \
157-
tests
158-
159145
- name: Run example PyTorch CPU tests
160146
if: ${{ matrix.config.framework == 'pytorch_examples' }}
161147
run: |

.github/workflows/pr_tests_gpu.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ jobs:
188188
shell: bash
189189
strategy:
190190
fail-fast: false
191-
max-parallel: 2
191+
max-parallel: 4
192192
matrix:
193193
module: [models, schedulers, lora, others]
194194
steps:

.github/workflows/push_tests.yml

Lines changed: 0 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -159,102 +159,6 @@ jobs:
159159
name: torch_cuda_test_reports_${{ matrix.module }}
160160
path: reports
161161

162-
flax_tpu_tests:
163-
name: Flax TPU Tests
164-
runs-on:
165-
group: gcp-ct5lp-hightpu-8t
166-
container:
167-
image: diffusers/diffusers-flax-tpu
168-
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
169-
defaults:
170-
run:
171-
shell: bash
172-
steps:
173-
- name: Checkout diffusers
174-
uses: actions/checkout@v3
175-
with:
176-
fetch-depth: 2
177-
178-
- name: Install dependencies
179-
run: |
180-
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
181-
python -m uv pip install -e [quality,test]
182-
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
183-
184-
- name: Environment
185-
run: |
186-
python utils/print_env.py
187-
188-
- name: Run Flax TPU tests
189-
env:
190-
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
191-
run: |
192-
python -m pytest -n 0 \
193-
-s -v -k "Flax" \
194-
--make-reports=tests_flax_tpu \
195-
tests/
196-
197-
- name: Failure short reports
198-
if: ${{ failure() }}
199-
run: |
200-
cat reports/tests_flax_tpu_stats.txt
201-
cat reports/tests_flax_tpu_failures_short.txt
202-
203-
- name: Test suite reports artifacts
204-
if: ${{ always() }}
205-
uses: actions/upload-artifact@v4
206-
with:
207-
name: flax_tpu_test_reports
208-
path: reports
209-
210-
onnx_cuda_tests:
211-
name: ONNX CUDA Tests
212-
runs-on:
213-
group: aws-g4dn-2xlarge
214-
container:
215-
image: diffusers/diffusers-onnxruntime-cuda
216-
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
217-
defaults:
218-
run:
219-
shell: bash
220-
steps:
221-
- name: Checkout diffusers
222-
uses: actions/checkout@v3
223-
with:
224-
fetch-depth: 2
225-
226-
- name: Install dependencies
227-
run: |
228-
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
229-
python -m uv pip install -e [quality,test]
230-
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
231-
232-
- name: Environment
233-
run: |
234-
python utils/print_env.py
235-
236-
- name: Run ONNXRuntime CUDA tests
237-
env:
238-
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
239-
run: |
240-
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
241-
-s -v -k "Onnx" \
242-
--make-reports=tests_onnx_cuda \
243-
tests/
244-
245-
- name: Failure short reports
246-
if: ${{ failure() }}
247-
run: |
248-
cat reports/tests_onnx_cuda_stats.txt
249-
cat reports/tests_onnx_cuda_failures_short.txt
250-
251-
- name: Test suite reports artifacts
252-
if: ${{ always() }}
253-
uses: actions/upload-artifact@v4
254-
with:
255-
name: onnx_cuda_test_reports
256-
path: reports
257-
258162
run_torch_compile_tests:
259163
name: PyTorch Compile CUDA tests
260164

0 commit comments

Comments
 (0)