Skip to content

Commit 6f912ab

Browse files
authored
Merge branch 'main' into modular-test
2 parents 1f0570d + 7b10e4a commit 6f912ab

File tree

317 files changed

+22431
-2247
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

317 files changed

+22431
-2247
lines changed

.github/workflows/benchmark.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ jobs:
2525
group: aws-g6e-4xlarge
2626
container:
2727
image: diffusers/diffusers-pytorch-cuda
28-
options: --shm-size "16gb" --ipc host --gpus 0
28+
options: --shm-size "16gb" --ipc host --gpus all
2929
steps:
3030
- name: Checkout diffusers
3131
uses: actions/checkout@v3

.github/workflows/mirror_community_pipeline.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,14 +79,14 @@ jobs:
7979
8080
# Check secret is set
8181
- name: whoami
82-
run: huggingface-cli whoami
82+
run: hf auth whoami
8383
env:
8484
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
8585

8686
# Push to HF! (under subfolder based on checkout ref)
8787
# https://huggingface.co/datasets/diffusers/community-pipelines-mirror
8888
- name: Mirror community pipeline to HF
89-
run: huggingface-cli upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
89+
run: hf upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
9090
env:
9191
PATH_IN_REPO: ${{ env.PATH_IN_REPO }}
9292
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}

.github/workflows/nightly_tests.yml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ jobs:
6161
group: aws-g4dn-2xlarge
6262
container:
6363
image: diffusers/diffusers-pytorch-cuda
64-
options: --shm-size "16gb" --ipc host --gpus 0
64+
options: --shm-size "16gb" --ipc host --gpus all
6565
steps:
6666
- name: Checkout diffusers
6767
uses: actions/checkout@v3
@@ -107,7 +107,7 @@ jobs:
107107
group: aws-g4dn-2xlarge
108108
container:
109109
image: diffusers/diffusers-pytorch-cuda
110-
options: --shm-size "16gb" --ipc host --gpus 0
110+
options: --shm-size "16gb" --ipc host --gpus all
111111
defaults:
112112
run:
113113
shell: bash
@@ -178,7 +178,7 @@ jobs:
178178

179179
container:
180180
image: diffusers/diffusers-pytorch-cuda
181-
options: --gpus 0 --shm-size "16gb" --ipc host
181+
options: --gpus all --shm-size "16gb" --ipc host
182182

183183
steps:
184184
- name: Checkout diffusers
@@ -222,7 +222,7 @@ jobs:
222222
group: aws-g6e-xlarge-plus
223223
container:
224224
image: diffusers/diffusers-pytorch-cuda
225-
options: --shm-size "16gb" --ipc host --gpus 0
225+
options: --shm-size "16gb" --ipc host --gpus all
226226
steps:
227227
- name: Checkout diffusers
228228
uses: actions/checkout@v3
@@ -270,7 +270,7 @@ jobs:
270270
group: aws-g4dn-2xlarge
271271
container:
272272
image: diffusers/diffusers-pytorch-minimum-cuda
273-
options: --shm-size "16gb" --ipc host --gpus 0
273+
options: --shm-size "16gb" --ipc host --gpus all
274274
defaults:
275275
run:
276276
shell: bash
@@ -333,7 +333,7 @@ jobs:
333333
additional_deps: ["peft"]
334334
- backend: "gguf"
335335
test_location: "gguf"
336-
additional_deps: ["peft"]
336+
additional_deps: ["peft", "kernels"]
337337
- backend: "torchao"
338338
test_location: "torchao"
339339
additional_deps: []
@@ -344,7 +344,7 @@ jobs:
344344
group: aws-g6e-xlarge-plus
345345
container:
346346
image: diffusers/diffusers-pytorch-cuda
347-
options: --shm-size "20gb" --ipc host --gpus 0
347+
options: --shm-size "20gb" --ipc host --gpus all
348348
steps:
349349
- name: Checkout diffusers
350350
uses: actions/checkout@v3
@@ -396,7 +396,7 @@ jobs:
396396
group: aws-g6e-xlarge-plus
397397
container:
398398
image: diffusers/diffusers-pytorch-cuda
399-
options: --shm-size "20gb" --ipc host --gpus 0
399+
options: --shm-size "20gb" --ipc host --gpus all
400400
steps:
401401
- name: Checkout diffusers
402402
uses: actions/checkout@v3

.github/workflows/pr_tests_gpu.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ on:
1313
- "src/diffusers/loaders/peft.py"
1414
- "tests/pipelines/test_pipelines_common.py"
1515
- "tests/models/test_modeling_common.py"
16+
- "examples/**/*.py"
1617
workflow_dispatch:
1718

1819
concurrency:
@@ -117,7 +118,7 @@ jobs:
117118
group: aws-g4dn-2xlarge
118119
container:
119120
image: diffusers/diffusers-pytorch-cuda
120-
options: --shm-size "16gb" --ipc host --gpus 0
121+
options: --shm-size "16gb" --ipc host --gpus all
121122
steps:
122123
- name: Checkout diffusers
123124
uses: actions/checkout@v3
@@ -182,7 +183,7 @@ jobs:
182183
group: aws-g4dn-2xlarge
183184
container:
184185
image: diffusers/diffusers-pytorch-cuda
185-
options: --shm-size "16gb" --ipc host --gpus 0
186+
options: --shm-size "16gb" --ipc host --gpus all
186187
defaults:
187188
run:
188189
shell: bash
@@ -252,7 +253,7 @@ jobs:
252253

253254
container:
254255
image: diffusers/diffusers-pytorch-cuda
255-
options: --gpus 0 --shm-size "16gb" --ipc host
256+
options: --gpus all --shm-size "16gb" --ipc host
256257
steps:
257258
- name: Checkout diffusers
258259
uses: actions/checkout@v3

.github/workflows/push_tests.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ jobs:
6464
group: aws-g4dn-2xlarge
6565
container:
6666
image: diffusers/diffusers-pytorch-cuda
67-
options: --shm-size "16gb" --ipc host --gpus 0
67+
options: --shm-size "16gb" --ipc host --gpus all
6868
steps:
6969
- name: Checkout diffusers
7070
uses: actions/checkout@v3
@@ -109,7 +109,7 @@ jobs:
109109
group: aws-g4dn-2xlarge
110110
container:
111111
image: diffusers/diffusers-pytorch-cuda
112-
options: --shm-size "16gb" --ipc host --gpus 0
112+
options: --shm-size "16gb" --ipc host --gpus all
113113
defaults:
114114
run:
115115
shell: bash
@@ -167,7 +167,7 @@ jobs:
167167

168168
container:
169169
image: diffusers/diffusers-pytorch-cuda
170-
options: --gpus 0 --shm-size "16gb" --ipc host
170+
options: --gpus all --shm-size "16gb" --ipc host
171171

172172
steps:
173173
- name: Checkout diffusers
@@ -210,7 +210,7 @@ jobs:
210210

211211
container:
212212
image: diffusers/diffusers-pytorch-xformers-cuda
213-
options: --gpus 0 --shm-size "16gb" --ipc host
213+
options: --gpus all --shm-size "16gb" --ipc host
214214

215215
steps:
216216
- name: Checkout diffusers
@@ -252,7 +252,7 @@ jobs:
252252

253253
container:
254254
image: diffusers/diffusers-pytorch-cuda
255-
options: --gpus 0 --shm-size "16gb" --ipc host
255+
options: --gpus all --shm-size "16gb" --ipc host
256256
steps:
257257
- name: Checkout diffusers
258258
uses: actions/checkout@v3

.github/workflows/release_tests_fast.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ jobs:
6262
group: aws-g4dn-2xlarge
6363
container:
6464
image: diffusers/diffusers-pytorch-cuda
65-
options: --shm-size "16gb" --ipc host --gpus 0
65+
options: --shm-size "16gb" --ipc host --gpus all
6666
steps:
6767
- name: Checkout diffusers
6868
uses: actions/checkout@v3
@@ -107,7 +107,7 @@ jobs:
107107
group: aws-g4dn-2xlarge
108108
container:
109109
image: diffusers/diffusers-pytorch-cuda
110-
options: --shm-size "16gb" --ipc host --gpus 0
110+
options: --shm-size "16gb" --ipc host --gpus all
111111
defaults:
112112
run:
113113
shell: bash
@@ -163,7 +163,7 @@ jobs:
163163
group: aws-g4dn-2xlarge
164164
container:
165165
image: diffusers/diffusers-pytorch-minimum-cuda
166-
options: --shm-size "16gb" --ipc host --gpus 0
166+
options: --shm-size "16gb" --ipc host --gpus all
167167
defaults:
168168
run:
169169
shell: bash
@@ -222,7 +222,7 @@ jobs:
222222

223223
container:
224224
image: diffusers/diffusers-pytorch-cuda
225-
options: --gpus 0 --shm-size "16gb" --ipc host
225+
options: --gpus all --shm-size "16gb" --ipc host
226226

227227
steps:
228228
- name: Checkout diffusers
@@ -265,7 +265,7 @@ jobs:
265265

266266
container:
267267
image: diffusers/diffusers-pytorch-xformers-cuda
268-
options: --gpus 0 --shm-size "16gb" --ipc host
268+
options: --gpus all --shm-size "16gb" --ipc host
269269

270270
steps:
271271
- name: Checkout diffusers
@@ -307,7 +307,7 @@ jobs:
307307

308308
container:
309309
image: diffusers/diffusers-pytorch-cuda
310-
options: --gpus 0 --shm-size "16gb" --ipc host
310+
options: --gpus all --shm-size "16gb" --ipc host
311311

312312
steps:
313313
- name: Checkout diffusers

.github/workflows/run_tests_from_a_pr.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ jobs:
3030
group: aws-g4dn-2xlarge
3131
container:
3232
image: ${{ github.event.inputs.docker_image }}
33-
options: --gpus 0 --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
33+
options: --gpus all --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
3434

3535
steps:
3636
- name: Validate test files input

.github/workflows/ssh-runner.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ jobs:
3131
group: "${{ github.event.inputs.runner_type }}"
3232
container:
3333
image: ${{ github.event.inputs.docker_image }}
34-
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 --privileged
34+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus all --privileged
3535

3636
steps:
3737
- name: Checkout diffusers

benchmarks/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ pip install -r requirements.txt
3131
We need to be authenticated to access some of the checkpoints used during benchmarking:
3232

3333
```sh
34-
huggingface-cli login
34+
hf auth login
3535
```
3636

3737
We use an L40 GPU with 128GB RAM to run the benchmark CI. As such, the benchmarks are configured to run on NVIDIA GPUs. So, make sure you have access to a similar machine (or modify the benchmarking scripts accordingly).

0 commit comments

Comments
 (0)