Skip to content

Commit 1203f44

Browse files
committed
Merge branch 'main' into integrations/cosmos
2 parents 59d7793 + 26149c0 commit 1203f44

File tree

392 files changed

+32654
-4197
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

392 files changed

+32654
-4197
lines changed
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
name: "\U0001F31F Remote VAE"
2+
description: Feedback for remote VAE pilot
3+
labels: [ "Remote VAE" ]
4+
5+
body:
6+
- type: textarea
7+
id: positive
8+
validations:
9+
required: true
10+
attributes:
11+
label: Did you like the remote VAE solution?
12+
description: |
13+
If you liked it, we would appreciate it if you could elaborate what you liked.
14+
15+
- type: textarea
16+
id: feedback
17+
validations:
18+
required: true
19+
attributes:
20+
label: What can be improved about the current solution?
21+
description: |
22+
Let us know the things you would like to see improved. Note that we will work optimizing the solution once the pilot is over and we have usage.
23+
24+
- type: textarea
25+
id: others
26+
validations:
27+
required: true
28+
attributes:
29+
label: What other VAEs you would like to see if the pilot goes well?
30+
description: |
31+
Provide a list of the VAEs you would like to see in the future if the pilot goes well.
32+
33+
- type: textarea
34+
id: additional-info
35+
attributes:
36+
label: Notify the members of the team
37+
description: |
38+
Tag the following folks when submitting this feedback: @hlky @sayakpaul

.github/workflows/nightly_tests.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -418,6 +418,8 @@ jobs:
418418
test_location: "gguf"
419419
- backend: "torchao"
420420
test_location: "torchao"
421+
- backend: "optimum_quanto"
422+
test_location: "quanto"
421423
runs-on:
422424
group: aws-g6e-xlarge-plus
423425
container:

.github/workflows/pr_style_bot.yml

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
name: PR Style Bot
2+
3+
on:
4+
issue_comment:
5+
types: [created]
6+
7+
permissions:
8+
contents: write
9+
pull-requests: write
10+
11+
jobs:
12+
style:
13+
uses: huggingface/huggingface_hub/.github/workflows/style-bot-action.yml@main
14+
with:
15+
python_quality_dependencies: "[quality]"
16+
pre_commit_script_name: "Download and Compare files from the main branch"
17+
pre_commit_script: |
18+
echo "Downloading the files from the main branch"
19+
20+
curl -o main_Makefile https://raw.githubusercontent.com/huggingface/diffusers/main/Makefile
21+
curl -o main_setup.py https://raw.githubusercontent.com/huggingface/diffusers/refs/heads/main/setup.py
22+
curl -o main_check_doc_toc.py https://raw.githubusercontent.com/huggingface/diffusers/refs/heads/main/utils/check_doc_toc.py
23+
24+
echo "Compare the files and raise error if needed"
25+
26+
diff_failed=0
27+
if ! diff -q main_Makefile Makefile; then
28+
echo "Error: The Makefile has changed. Please ensure it matches the main branch."
29+
diff_failed=1
30+
fi
31+
32+
if ! diff -q main_setup.py setup.py; then
33+
echo "Error: The setup.py has changed. Please ensure it matches the main branch."
34+
diff_failed=1
35+
fi
36+
37+
if ! diff -q main_check_doc_toc.py utils/check_doc_toc.py; then
38+
echo "Error: The utils/check_doc_toc.py has changed. Please ensure it matches the main branch."
39+
diff_failed=1
40+
fi
41+
42+
if [ $diff_failed -eq 1 ]; then
43+
echo "❌ Error happened as we detected changes in the files that should not be changed ❌"
44+
exit 1
45+
fi
46+
47+
echo "No changes in the files. Proceeding..."
48+
rm -rf main_Makefile main_setup.py main_check_doc_toc.py
49+
style_command: "make style && make quality"
50+
secrets:
51+
bot_token: ${{ secrets.GITHUB_TOKEN }}

.github/workflows/pr_tests.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@ name: Fast tests for PRs
22

33
on:
44
pull_request:
5-
branches:
6-
- main
5+
branches: [main]
76
paths:
87
- "src/diffusers/**.py"
98
- "benchmarks/**.py"
@@ -64,6 +63,7 @@ jobs:
6463
run: |
6564
python utils/check_copies.py
6665
python utils/check_dummies.py
66+
python utils/check_support_list.py
6767
make deps_table_check_updated
6868
- name: Check if failure
6969
if: ${{ failure() }}
@@ -120,7 +120,8 @@ jobs:
120120
run: |
121121
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
122122
python -m uv pip install -e [quality,test]
123-
python -m uv pip install accelerate
123+
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
124+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
124125
125126
- name: Environment
126127
run: |

.github/workflows/pr_tests_gpu.yml

Lines changed: 250 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,250 @@
1+
name: Fast GPU Tests on PR
2+
3+
on:
4+
pull_request:
5+
branches: main
6+
paths:
7+
- "src/diffusers/models/modeling_utils.py"
8+
- "src/diffusers/models/model_loading_utils.py"
9+
- "src/diffusers/pipelines/pipeline_utils.py"
10+
- "src/diffusers/pipeline_loading_utils.py"
11+
- "src/diffusers/loaders/lora_base.py"
12+
- "src/diffusers/loaders/lora_pipeline.py"
13+
- "src/diffusers/loaders/peft.py"
14+
- "tests/pipelines/test_pipelines_common.py"
15+
- "tests/models/test_modeling_common.py"
16+
workflow_dispatch:
17+
18+
concurrency:
19+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
20+
cancel-in-progress: true
21+
22+
env:
23+
DIFFUSERS_IS_CI: yes
24+
OMP_NUM_THREADS: 8
25+
MKL_NUM_THREADS: 8
26+
HF_HUB_ENABLE_HF_TRANSFER: 1
27+
PYTEST_TIMEOUT: 600
28+
PIPELINE_USAGE_CUTOFF: 1000000000 # set high cutoff so that only always-test pipelines run
29+
30+
jobs:
31+
setup_torch_cuda_pipeline_matrix:
32+
name: Setup Torch Pipelines CUDA Slow Tests Matrix
33+
runs-on:
34+
group: aws-general-8-plus
35+
container:
36+
image: diffusers/diffusers-pytorch-cpu
37+
outputs:
38+
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
39+
steps:
40+
- name: Checkout diffusers
41+
uses: actions/checkout@v3
42+
with:
43+
fetch-depth: 2
44+
- name: Install dependencies
45+
run: |
46+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
47+
python -m uv pip install -e [quality,test]
48+
- name: Environment
49+
run: |
50+
python utils/print_env.py
51+
- name: Fetch Pipeline Matrix
52+
id: fetch_pipeline_matrix
53+
run: |
54+
matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
55+
echo $matrix
56+
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
57+
- name: Pipeline Tests Artifacts
58+
if: ${{ always() }}
59+
uses: actions/upload-artifact@v4
60+
with:
61+
name: test-pipelines.json
62+
path: reports
63+
64+
torch_pipelines_cuda_tests:
65+
name: Torch Pipelines CUDA Tests
66+
needs: setup_torch_cuda_pipeline_matrix
67+
strategy:
68+
fail-fast: false
69+
max-parallel: 8
70+
matrix:
71+
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
72+
runs-on:
73+
group: aws-g4dn-2xlarge
74+
container:
75+
image: diffusers/diffusers-pytorch-cuda
76+
options: --shm-size "16gb" --ipc host --gpus 0
77+
steps:
78+
- name: Checkout diffusers
79+
uses: actions/checkout@v3
80+
with:
81+
fetch-depth: 2
82+
83+
- name: NVIDIA-SMI
84+
run: |
85+
nvidia-smi
86+
- name: Install dependencies
87+
run: |
88+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
89+
python -m uv pip install -e [quality,test]
90+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
91+
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
92+
93+
- name: Environment
94+
run: |
95+
python utils/print_env.py
96+
- name: Extract tests
97+
id: extract_tests
98+
run: |
99+
pattern=$(python utils/extract_tests_from_mixin.py --type pipeline)
100+
echo "$pattern" > /tmp/test_pattern.txt
101+
echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT
102+
103+
- name: PyTorch CUDA checkpoint tests on Ubuntu
104+
env:
105+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
106+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
107+
CUBLAS_WORKSPACE_CONFIG: :16:8
108+
run: |
109+
if [ "${{ matrix.module }}" = "ip_adapters" ]; then
110+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
111+
-s -v -k "not Flax and not Onnx" \
112+
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
113+
tests/pipelines/${{ matrix.module }}
114+
else
115+
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
116+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
117+
-s -v -k "not Flax and not Onnx and $pattern" \
118+
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
119+
tests/pipelines/${{ matrix.module }}
120+
fi
121+
122+
- name: Failure short reports
123+
if: ${{ failure() }}
124+
run: |
125+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
126+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
127+
- name: Test suite reports artifacts
128+
if: ${{ always() }}
129+
uses: actions/upload-artifact@v4
130+
with:
131+
name: pipeline_${{ matrix.module }}_test_reports
132+
path: reports
133+
134+
torch_cuda_tests:
135+
name: Torch CUDA Tests
136+
runs-on:
137+
group: aws-g4dn-2xlarge
138+
container:
139+
image: diffusers/diffusers-pytorch-cuda
140+
options: --shm-size "16gb" --ipc host --gpus 0
141+
defaults:
142+
run:
143+
shell: bash
144+
strategy:
145+
fail-fast: false
146+
max-parallel: 2
147+
matrix:
148+
module: [models, schedulers, lora, others]
149+
steps:
150+
- name: Checkout diffusers
151+
uses: actions/checkout@v3
152+
with:
153+
fetch-depth: 2
154+
155+
- name: Install dependencies
156+
run: |
157+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
158+
python -m uv pip install -e [quality,test]
159+
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
160+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
161+
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
162+
163+
- name: Environment
164+
run: |
165+
python utils/print_env.py
166+
167+
- name: Extract tests
168+
id: extract_tests
169+
run: |
170+
pattern=$(python utils/extract_tests_from_mixin.py --type ${{ matrix.module }})
171+
echo "$pattern" > /tmp/test_pattern.txt
172+
echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT
173+
174+
- name: Run PyTorch CUDA tests
175+
env:
176+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
177+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
178+
CUBLAS_WORKSPACE_CONFIG: :16:8
179+
run: |
180+
pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }})
181+
if [ -z "$pattern" ]; then
182+
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \
183+
--make-reports=tests_torch_cuda_${{ matrix.module }}
184+
else
185+
python -m pytest -n 1 -sv --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \
186+
--make-reports=tests_torch_cuda_${{ matrix.module }}
187+
fi
188+
189+
- name: Failure short reports
190+
if: ${{ failure() }}
191+
run: |
192+
cat reports/tests_torch_cuda_${{ matrix.module }}_stats.txt
193+
cat reports/tests_torch_cuda_${{ matrix.module }}_failures_short.txt
194+
195+
- name: Test suite reports artifacts
196+
if: ${{ always() }}
197+
uses: actions/upload-artifact@v4
198+
with:
199+
name: torch_cuda_test_reports_${{ matrix.module }}
200+
path: reports
201+
202+
run_examples_tests:
203+
name: Examples PyTorch CUDA tests on Ubuntu
204+
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
205+
runs-on:
206+
group: aws-g4dn-2xlarge
207+
208+
container:
209+
image: diffusers/diffusers-pytorch-cuda
210+
options: --gpus 0 --shm-size "16gb" --ipc host
211+
steps:
212+
- name: Checkout diffusers
213+
uses: actions/checkout@v3
214+
with:
215+
fetch-depth: 2
216+
217+
- name: NVIDIA-SMI
218+
run: |
219+
nvidia-smi
220+
- name: Install dependencies
221+
run: |
222+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
223+
python -m uv pip install -e [quality,test,training]
224+
225+
- name: Environment
226+
run: |
227+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
228+
python utils/print_env.py
229+
230+
- name: Run example tests on GPU
231+
env:
232+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
233+
run: |
234+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
235+
python -m uv pip install timm
236+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
237+
238+
- name: Failure short reports
239+
if: ${{ failure() }}
240+
run: |
241+
cat reports/examples_torch_cuda_stats.txt
242+
cat reports/examples_torch_cuda_failures_short.txt
243+
244+
- name: Test suite reports artifacts
245+
if: ${{ always() }}
246+
uses: actions/upload-artifact@v4
247+
with:
248+
name: examples_test_reports
249+
path: reports
250+

.github/workflows/push_tests.yml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,6 @@ jobs:
349349
container:
350350
image: diffusers/diffusers-pytorch-cuda
351351
options: --gpus 0 --shm-size "16gb" --ipc host
352-
353352
steps:
354353
- name: Checkout diffusers
355354
uses: actions/checkout@v3
@@ -359,7 +358,6 @@ jobs:
359358
- name: NVIDIA-SMI
360359
run: |
361360
nvidia-smi
362-
363361
- name: Install dependencies
364362
run: |
365363
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"

0 commit comments

Comments
 (0)