Skip to content

Commit 39f688a

Browse files
committed
Merge branch 'main' into modular-diffusers-wan-i2v-flf2v
2 parents 2ff808d + 3770571 commit 39f688a

File tree

217 files changed

+9560
-602
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

217 files changed

+9560
-602
lines changed

.github/workflows/benchmark.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ jobs:
2525
group: aws-g6e-4xlarge
2626
container:
2727
image: diffusers/diffusers-pytorch-cuda
28-
options: --shm-size "16gb" --ipc host --gpus 0
28+
options: --shm-size "16gb" --ipc host --gpus all
2929
steps:
3030
- name: Checkout diffusers
3131
uses: actions/checkout@v3

.github/workflows/mirror_community_pipeline.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,14 +79,14 @@ jobs:
7979
8080
# Check secret is set
8181
- name: whoami
82-
run: huggingface-cli whoami
82+
run: hf auth whoami
8383
env:
8484
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
8585

8686
# Push to HF! (under subfolder based on checkout ref)
8787
# https://huggingface.co/datasets/diffusers/community-pipelines-mirror
8888
- name: Mirror community pipeline to HF
89-
run: huggingface-cli upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
89+
run: hf upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
9090
env:
9191
PATH_IN_REPO: ${{ env.PATH_IN_REPO }}
9292
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}

.github/workflows/nightly_tests.yml

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ jobs:
6161
group: aws-g4dn-2xlarge
6262
container:
6363
image: diffusers/diffusers-pytorch-cuda
64-
options: --shm-size "16gb" --ipc host --gpus 0
64+
options: --shm-size "16gb" --ipc host --gpus all
6565
steps:
6666
- name: Checkout diffusers
6767
uses: actions/checkout@v3
@@ -107,7 +107,7 @@ jobs:
107107
group: aws-g4dn-2xlarge
108108
container:
109109
image: diffusers/diffusers-pytorch-cuda
110-
options: --shm-size "16gb" --ipc host --gpus 0
110+
options: --shm-size "16gb" --ipc host --gpus all
111111
defaults:
112112
run:
113113
shell: bash
@@ -178,7 +178,7 @@ jobs:
178178

179179
container:
180180
image: diffusers/diffusers-pytorch-cuda
181-
options: --gpus 0 --shm-size "16gb" --ipc host
181+
options: --gpus all --shm-size "16gb" --ipc host
182182

183183
steps:
184184
- name: Checkout diffusers
@@ -222,7 +222,7 @@ jobs:
222222
group: aws-g6e-xlarge-plus
223223
container:
224224
image: diffusers/diffusers-pytorch-cuda
225-
options: --shm-size "16gb" --ipc host --gpus 0
225+
options: --shm-size "16gb" --ipc host --gpus all
226226
steps:
227227
- name: Checkout diffusers
228228
uses: actions/checkout@v3
@@ -270,7 +270,7 @@ jobs:
270270
group: aws-g4dn-2xlarge
271271
container:
272272
image: diffusers/diffusers-pytorch-minimum-cuda
273-
options: --shm-size "16gb" --ipc host --gpus 0
273+
options: --shm-size "16gb" --ipc host --gpus all
274274
defaults:
275275
run:
276276
shell: bash
@@ -344,7 +344,7 @@ jobs:
344344
group: aws-g6e-xlarge-plus
345345
container:
346346
image: diffusers/diffusers-pytorch-cuda
347-
options: --shm-size "20gb" --ipc host --gpus 0
347+
options: --shm-size "20gb" --ipc host --gpus all
348348
steps:
349349
- name: Checkout diffusers
350350
uses: actions/checkout@v3
@@ -396,7 +396,7 @@ jobs:
396396
group: aws-g6e-xlarge-plus
397397
container:
398398
image: diffusers/diffusers-pytorch-cuda
399-
options: --shm-size "20gb" --ipc host --gpus 0
399+
options: --shm-size "20gb" --ipc host --gpus all
400400
steps:
401401
- name: Checkout diffusers
402402
uses: actions/checkout@v3

.github/workflows/pr_tests_gpu.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ jobs:
118118
group: aws-g4dn-2xlarge
119119
container:
120120
image: diffusers/diffusers-pytorch-cuda
121-
options: --shm-size "16gb" --ipc host --gpus 0
121+
options: --shm-size "16gb" --ipc host --gpus all
122122
steps:
123123
- name: Checkout diffusers
124124
uses: actions/checkout@v3
@@ -183,7 +183,7 @@ jobs:
183183
group: aws-g4dn-2xlarge
184184
container:
185185
image: diffusers/diffusers-pytorch-cuda
186-
options: --shm-size "16gb" --ipc host --gpus 0
186+
options: --shm-size "16gb" --ipc host --gpus all
187187
defaults:
188188
run:
189189
shell: bash
@@ -253,7 +253,7 @@ jobs:
253253

254254
container:
255255
image: diffusers/diffusers-pytorch-cuda
256-
options: --gpus 0 --shm-size "16gb" --ipc host
256+
options: --gpus all --shm-size "16gb" --ipc host
257257
steps:
258258
- name: Checkout diffusers
259259
uses: actions/checkout@v3

.github/workflows/push_tests.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ jobs:
6464
group: aws-g4dn-2xlarge
6565
container:
6666
image: diffusers/diffusers-pytorch-cuda
67-
options: --shm-size "16gb" --ipc host --gpus 0
67+
options: --shm-size "16gb" --ipc host --gpus all
6868
steps:
6969
- name: Checkout diffusers
7070
uses: actions/checkout@v3
@@ -109,7 +109,7 @@ jobs:
109109
group: aws-g4dn-2xlarge
110110
container:
111111
image: diffusers/diffusers-pytorch-cuda
112-
options: --shm-size "16gb" --ipc host --gpus 0
112+
options: --shm-size "16gb" --ipc host --gpus all
113113
defaults:
114114
run:
115115
shell: bash
@@ -167,7 +167,7 @@ jobs:
167167

168168
container:
169169
image: diffusers/diffusers-pytorch-cuda
170-
options: --gpus 0 --shm-size "16gb" --ipc host
170+
options: --gpus all --shm-size "16gb" --ipc host
171171

172172
steps:
173173
- name: Checkout diffusers
@@ -210,7 +210,7 @@ jobs:
210210

211211
container:
212212
image: diffusers/diffusers-pytorch-xformers-cuda
213-
options: --gpus 0 --shm-size "16gb" --ipc host
213+
options: --gpus all --shm-size "16gb" --ipc host
214214

215215
steps:
216216
- name: Checkout diffusers
@@ -252,7 +252,7 @@ jobs:
252252

253253
container:
254254
image: diffusers/diffusers-pytorch-cuda
255-
options: --gpus 0 --shm-size "16gb" --ipc host
255+
options: --gpus all --shm-size "16gb" --ipc host
256256
steps:
257257
- name: Checkout diffusers
258258
uses: actions/checkout@v3

.github/workflows/release_tests_fast.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ jobs:
6262
group: aws-g4dn-2xlarge
6363
container:
6464
image: diffusers/diffusers-pytorch-cuda
65-
options: --shm-size "16gb" --ipc host --gpus 0
65+
options: --shm-size "16gb" --ipc host --gpus all
6666
steps:
6767
- name: Checkout diffusers
6868
uses: actions/checkout@v3
@@ -107,7 +107,7 @@ jobs:
107107
group: aws-g4dn-2xlarge
108108
container:
109109
image: diffusers/diffusers-pytorch-cuda
110-
options: --shm-size "16gb" --ipc host --gpus 0
110+
options: --shm-size "16gb" --ipc host --gpus all
111111
defaults:
112112
run:
113113
shell: bash
@@ -163,7 +163,7 @@ jobs:
163163
group: aws-g4dn-2xlarge
164164
container:
165165
image: diffusers/diffusers-pytorch-minimum-cuda
166-
options: --shm-size "16gb" --ipc host --gpus 0
166+
options: --shm-size "16gb" --ipc host --gpus all
167167
defaults:
168168
run:
169169
shell: bash
@@ -222,7 +222,7 @@ jobs:
222222

223223
container:
224224
image: diffusers/diffusers-pytorch-cuda
225-
options: --gpus 0 --shm-size "16gb" --ipc host
225+
options: --gpus all --shm-size "16gb" --ipc host
226226

227227
steps:
228228
- name: Checkout diffusers
@@ -265,7 +265,7 @@ jobs:
265265

266266
container:
267267
image: diffusers/diffusers-pytorch-xformers-cuda
268-
options: --gpus 0 --shm-size "16gb" --ipc host
268+
options: --gpus all --shm-size "16gb" --ipc host
269269

270270
steps:
271271
- name: Checkout diffusers
@@ -307,7 +307,7 @@ jobs:
307307

308308
container:
309309
image: diffusers/diffusers-pytorch-cuda
310-
options: --gpus 0 --shm-size "16gb" --ipc host
310+
options: --gpus all --shm-size "16gb" --ipc host
311311

312312
steps:
313313
- name: Checkout diffusers

.github/workflows/run_tests_from_a_pr.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ jobs:
3030
group: aws-g4dn-2xlarge
3131
container:
3232
image: ${{ github.event.inputs.docker_image }}
33-
options: --gpus 0 --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
33+
options: --gpus all --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
3434

3535
steps:
3636
- name: Validate test files input

.github/workflows/ssh-runner.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ jobs:
3131
group: "${{ github.event.inputs.runner_type }}"
3232
container:
3333
image: ${{ github.event.inputs.docker_image }}
34-
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 --privileged
34+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus all --privileged
3535

3636
steps:
3737
- name: Checkout diffusers

benchmarks/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ pip install -r requirements.txt
3131
We need to be authenticated to access some of the checkpoints used during benchmarking:
3232

3333
```sh
34-
huggingface-cli login
34+
hf auth login
3535
```
3636

3737
We use an L40 GPU with 128GB RAM to run the benchmark CI. As such, the benchmarks are configured to run on NVIDIA GPUs. So, make sure you have access to a similar machine (or modify the benchmarking scripts accordingly).

docs/source/en/_toctree.yml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@
179179
isExpanded: false
180180
sections:
181181
- local: quantization/overview
182-
title: Getting Started
182+
title: Getting started
183183
- local: quantization/bitsandbytes
184184
title: bitsandbytes
185185
- local: quantization/gguf
@@ -366,6 +366,8 @@
366366
title: PixArtTransformer2DModel
367367
- local: api/models/prior_transformer
368368
title: PriorTransformer
369+
- local: api/models/qwenimage_transformer2d
370+
title: QwenImageTransformer2DModel
369371
- local: api/models/sana_transformer2d
370372
title: SanaTransformer2DModel
371373
- local: api/models/sd3_transformer2d
@@ -418,6 +420,8 @@
418420
title: AutoencoderKLMagvit
419421
- local: api/models/autoencoderkl_mochi
420422
title: AutoencoderKLMochi
423+
- local: api/models/autoencoderkl_qwenimage
424+
title: AutoencoderKLQwenImage
421425
- local: api/models/autoencoder_kl_wan
422426
title: AutoencoderKLWan
423427
- local: api/models/consistency_decoder_vae
@@ -554,6 +558,8 @@
554558
title: PixArt-α
555559
- local: api/pipelines/pixart_sigma
556560
title: PixArt-Σ
561+
- local: api/pipelines/qwenimage
562+
title: QwenImage
557563
- local: api/pipelines/sana
558564
title: Sana
559565
- local: api/pipelines/sana_sprint

0 commit comments

Comments
 (0)