Skip to content

Commit 77fa954

Browse files
authored
Merge branch 'main' into modular-standard-repo
2 parents 25f0143 + cbecc33 commit 77fa954

File tree

583 files changed

+34314
-9750
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

583 files changed

+34314
-9750
lines changed

.github/workflows/benchmark.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ jobs:
2525
group: aws-g6e-4xlarge
2626
container:
2727
image: diffusers/diffusers-pytorch-cuda
28-
options: --shm-size "16gb" --ipc host --gpus 0
28+
options: --shm-size "16gb" --ipc host --gpus all
2929
steps:
3030
- name: Checkout diffusers
3131
uses: actions/checkout@v3

.github/workflows/mirror_community_pipeline.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,14 +79,14 @@ jobs:
7979
8080
# Check secret is set
8181
- name: whoami
82-
run: huggingface-cli whoami
82+
run: hf auth whoami
8383
env:
8484
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
8585

8686
# Push to HF! (under subfolder based on checkout ref)
8787
# https://huggingface.co/datasets/diffusers/community-pipelines-mirror
8888
- name: Mirror community pipeline to HF
89-
run: huggingface-cli upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
89+
run: hf upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
9090
env:
9191
PATH_IN_REPO: ${{ env.PATH_IN_REPO }}
9292
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}

.github/workflows/nightly_tests.yml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ jobs:
6161
group: aws-g4dn-2xlarge
6262
container:
6363
image: diffusers/diffusers-pytorch-cuda
64-
options: --shm-size "16gb" --ipc host --gpus 0
64+
options: --shm-size "16gb" --ipc host --gpus all
6565
steps:
6666
- name: Checkout diffusers
6767
uses: actions/checkout@v3
@@ -107,7 +107,7 @@ jobs:
107107
group: aws-g4dn-2xlarge
108108
container:
109109
image: diffusers/diffusers-pytorch-cuda
110-
options: --shm-size "16gb" --ipc host --gpus 0
110+
options: --shm-size "16gb" --ipc host --gpus all
111111
defaults:
112112
run:
113113
shell: bash
@@ -178,7 +178,7 @@ jobs:
178178

179179
container:
180180
image: diffusers/diffusers-pytorch-cuda
181-
options: --gpus 0 --shm-size "16gb" --ipc host
181+
options: --gpus all --shm-size "16gb" --ipc host
182182

183183
steps:
184184
- name: Checkout diffusers
@@ -222,7 +222,7 @@ jobs:
222222
group: aws-g6e-xlarge-plus
223223
container:
224224
image: diffusers/diffusers-pytorch-cuda
225-
options: --shm-size "16gb" --ipc host --gpus 0
225+
options: --shm-size "16gb" --ipc host --gpus all
226226
steps:
227227
- name: Checkout diffusers
228228
uses: actions/checkout@v3
@@ -270,7 +270,7 @@ jobs:
270270
group: aws-g4dn-2xlarge
271271
container:
272272
image: diffusers/diffusers-pytorch-minimum-cuda
273-
options: --shm-size "16gb" --ipc host --gpus 0
273+
options: --shm-size "16gb" --ipc host --gpus all
274274
defaults:
275275
run:
276276
shell: bash
@@ -333,7 +333,7 @@ jobs:
333333
additional_deps: ["peft"]
334334
- backend: "gguf"
335335
test_location: "gguf"
336-
additional_deps: ["peft"]
336+
additional_deps: ["peft", "kernels"]
337337
- backend: "torchao"
338338
test_location: "torchao"
339339
additional_deps: []
@@ -344,7 +344,7 @@ jobs:
344344
group: aws-g6e-xlarge-plus
345345
container:
346346
image: diffusers/diffusers-pytorch-cuda
347-
options: --shm-size "20gb" --ipc host --gpus 0
347+
options: --shm-size "20gb" --ipc host --gpus all
348348
steps:
349349
- name: Checkout diffusers
350350
uses: actions/checkout@v3
@@ -396,7 +396,7 @@ jobs:
396396
group: aws-g6e-xlarge-plus
397397
container:
398398
image: diffusers/diffusers-pytorch-cuda
399-
options: --shm-size "20gb" --ipc host --gpus 0
399+
options: --shm-size "20gb" --ipc host --gpus all
400400
steps:
401401
- name: Checkout diffusers
402402
uses: actions/checkout@v3

.github/workflows/pr_flax_dependency_test.yml

Lines changed: 0 additions & 38 deletions
This file was deleted.
Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
name: Fast PR tests for Modular
2+
3+
on:
4+
pull_request:
5+
branches: [main]
6+
paths:
7+
- "src/diffusers/modular_pipelines/**.py"
8+
- "src/diffusers/models/modeling_utils.py"
9+
- "src/diffusers/models/model_loading_utils.py"
10+
- "src/diffusers/pipelines/pipeline_utils.py"
11+
- "src/diffusers/pipeline_loading_utils.py"
12+
- "src/diffusers/loaders/lora_base.py"
13+
- "src/diffusers/loaders/lora_pipeline.py"
14+
- "src/diffusers/loaders/peft.py"
15+
- "tests/modular_pipelines/**.py"
16+
- ".github/**.yml"
17+
- "utils/**.py"
18+
- "setup.py"
19+
push:
20+
branches:
21+
- ci-*
22+
23+
concurrency:
24+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
25+
cancel-in-progress: true
26+
27+
env:
28+
DIFFUSERS_IS_CI: yes
29+
HF_HUB_ENABLE_HF_TRANSFER: 1
30+
OMP_NUM_THREADS: 4
31+
MKL_NUM_THREADS: 4
32+
PYTEST_TIMEOUT: 60
33+
34+
jobs:
35+
check_code_quality:
36+
runs-on: ubuntu-22.04
37+
steps:
38+
- uses: actions/checkout@v3
39+
- name: Set up Python
40+
uses: actions/setup-python@v4
41+
with:
42+
python-version: "3.10"
43+
- name: Install dependencies
44+
run: |
45+
python -m pip install --upgrade pip
46+
pip install .[quality]
47+
- name: Check quality
48+
run: make quality
49+
- name: Check if failure
50+
if: ${{ failure() }}
51+
run: |
52+
echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY
53+
54+
check_repository_consistency:
55+
needs: check_code_quality
56+
runs-on: ubuntu-22.04
57+
steps:
58+
- uses: actions/checkout@v3
59+
- name: Set up Python
60+
uses: actions/setup-python@v4
61+
with:
62+
python-version: "3.10"
63+
- name: Install dependencies
64+
run: |
65+
python -m pip install --upgrade pip
66+
pip install .[quality]
67+
- name: Check repo consistency
68+
run: |
69+
python utils/check_copies.py
70+
python utils/check_dummies.py
71+
python utils/check_support_list.py
72+
make deps_table_check_updated
73+
- name: Check if failure
74+
if: ${{ failure() }}
75+
run: |
76+
echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY
77+
78+
run_fast_tests:
79+
needs: [check_code_quality, check_repository_consistency]
80+
strategy:
81+
fail-fast: false
82+
matrix:
83+
config:
84+
- name: Fast PyTorch Modular Pipeline CPU tests
85+
framework: pytorch_pipelines
86+
runner: aws-highmemory-32-plus
87+
image: diffusers/diffusers-pytorch-cpu
88+
report: torch_cpu_modular_pipelines
89+
90+
name: ${{ matrix.config.name }}
91+
92+
runs-on:
93+
group: ${{ matrix.config.runner }}
94+
95+
container:
96+
image: ${{ matrix.config.image }}
97+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
98+
99+
defaults:
100+
run:
101+
shell: bash
102+
103+
steps:
104+
- name: Checkout diffusers
105+
uses: actions/checkout@v3
106+
with:
107+
fetch-depth: 2
108+
109+
- name: Install dependencies
110+
run: |
111+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
112+
python -m uv pip install -e [quality,test]
113+
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
114+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
115+
116+
- name: Environment
117+
run: |
118+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
119+
python utils/print_env.py
120+
121+
- name: Run fast PyTorch Pipeline CPU tests
122+
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
123+
run: |
124+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
125+
python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile \
126+
-s -v -k "not Flax and not Onnx" \
127+
--make-reports=tests_${{ matrix.config.report }} \
128+
tests/modular_pipelines
129+
130+
- name: Failure short reports
131+
if: ${{ failure() }}
132+
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
133+
134+
- name: Test suite reports artifacts
135+
if: ${{ always() }}
136+
uses: actions/upload-artifact@v4
137+
with:
138+
name: pr_${{ matrix.config.framework }}_${{ matrix.config.report }}_test_reports
139+
path: reports
140+
141+

.github/workflows/pr_tests_gpu.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ jobs:
118118
group: aws-g4dn-2xlarge
119119
container:
120120
image: diffusers/diffusers-pytorch-cuda
121-
options: --shm-size "16gb" --ipc host --gpus 0
121+
options: --shm-size "16gb" --ipc host --gpus all
122122
steps:
123123
- name: Checkout diffusers
124124
uses: actions/checkout@v3
@@ -183,7 +183,7 @@ jobs:
183183
group: aws-g4dn-2xlarge
184184
container:
185185
image: diffusers/diffusers-pytorch-cuda
186-
options: --shm-size "16gb" --ipc host --gpus 0
186+
options: --shm-size "16gb" --ipc host --gpus all
187187
defaults:
188188
run:
189189
shell: bash
@@ -253,7 +253,7 @@ jobs:
253253

254254
container:
255255
image: diffusers/diffusers-pytorch-cuda
256-
options: --gpus 0 --shm-size "16gb" --ipc host
256+
options: --gpus all --shm-size "16gb" --ipc host
257257
steps:
258258
- name: Checkout diffusers
259259
uses: actions/checkout@v3

.github/workflows/push_tests.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ jobs:
6464
group: aws-g4dn-2xlarge
6565
container:
6666
image: diffusers/diffusers-pytorch-cuda
67-
options: --shm-size "16gb" --ipc host --gpus 0
67+
options: --shm-size "16gb" --ipc host --gpus all
6868
steps:
6969
- name: Checkout diffusers
7070
uses: actions/checkout@v3
@@ -109,7 +109,7 @@ jobs:
109109
group: aws-g4dn-2xlarge
110110
container:
111111
image: diffusers/diffusers-pytorch-cuda
112-
options: --shm-size "16gb" --ipc host --gpus 0
112+
options: --shm-size "16gb" --ipc host --gpus all
113113
defaults:
114114
run:
115115
shell: bash
@@ -167,7 +167,7 @@ jobs:
167167

168168
container:
169169
image: diffusers/diffusers-pytorch-cuda
170-
options: --gpus 0 --shm-size "16gb" --ipc host
170+
options: --gpus all --shm-size "16gb" --ipc host
171171

172172
steps:
173173
- name: Checkout diffusers
@@ -210,7 +210,7 @@ jobs:
210210

211211
container:
212212
image: diffusers/diffusers-pytorch-xformers-cuda
213-
options: --gpus 0 --shm-size "16gb" --ipc host
213+
options: --gpus all --shm-size "16gb" --ipc host
214214

215215
steps:
216216
- name: Checkout diffusers
@@ -252,7 +252,7 @@ jobs:
252252

253253
container:
254254
image: diffusers/diffusers-pytorch-cuda
255-
options: --gpus 0 --shm-size "16gb" --ipc host
255+
options: --gpus all --shm-size "16gb" --ipc host
256256
steps:
257257
- name: Checkout diffusers
258258
uses: actions/checkout@v3

0 commit comments

Comments
 (0)