Skip to content

Commit b8aa2cc

Browse files
authored
Merge branch 'main' into main
2 parents 1cb6191 + 01780c3 commit b8aa2cc

File tree

345 files changed

+48758
-2954
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

345 files changed

+48758
-2954
lines changed

.github/workflows/nightly_tests.yml

Lines changed: 66 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -238,12 +238,13 @@ jobs:
238238
239239
run_flax_tpu_tests:
240240
name: Nightly Flax TPU Tests
241-
runs-on: docker-tpu
241+
runs-on:
242+
group: gcp-ct5lp-hightpu-8t
242243
if: github.event_name == 'schedule'
243244

244245
container:
245246
image: diffusers/diffusers-flax-tpu
246-
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --privileged
247+
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
247248
defaults:
248249
run:
249250
shell: bash
@@ -347,6 +348,68 @@ jobs:
347348
pip install slack_sdk tabulate
348349
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
349350
351+
run_nightly_quantization_tests:
352+
name: Torch quantization nightly tests
353+
strategy:
354+
fail-fast: false
355+
max-parallel: 2
356+
matrix:
357+
config:
358+
- backend: "bitsandbytes"
359+
test_location: "bnb"
360+
- backend: "gguf"
361+
test_location: "gguf"
362+
- backend: "torchao"
363+
test_location: "torchao"
364+
runs-on:
365+
group: aws-g6e-xlarge-plus
366+
container:
367+
image: diffusers/diffusers-pytorch-cuda
368+
options: --shm-size "20gb" --ipc host --gpus 0
369+
steps:
370+
- name: Checkout diffusers
371+
uses: actions/checkout@v3
372+
with:
373+
fetch-depth: 2
374+
- name: NVIDIA-SMI
375+
run: nvidia-smi
376+
- name: Install dependencies
377+
run: |
378+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
379+
python -m uv pip install -e [quality,test]
380+
python -m uv pip install -U ${{ matrix.config.backend }}
381+
python -m uv pip install pytest-reportlog
382+
- name: Environment
383+
run: |
384+
python utils/print_env.py
385+
- name: ${{ matrix.config.backend }} quantization tests on GPU
386+
env:
387+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
388+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
389+
CUBLAS_WORKSPACE_CONFIG: :16:8
390+
BIG_GPU_MEMORY: 40
391+
run: |
392+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
393+
--make-reports=tests_${{ matrix.config.backend }}_torch_cuda \
394+
--report-log=tests_${{ matrix.config.backend }}_torch_cuda.log \
395+
tests/quantization/${{ matrix.config.test_location }}
396+
- name: Failure short reports
397+
if: ${{ failure() }}
398+
run: |
399+
cat reports/tests_${{ matrix.config.backend }}_torch_cuda_stats.txt
400+
cat reports/tests_${{ matrix.config.backend }}_torch_cuda_failures_short.txt
401+
- name: Test suite reports artifacts
402+
if: ${{ always() }}
403+
uses: actions/upload-artifact@v4
404+
with:
405+
name: torch_cuda_${{ matrix.config.backend }}_reports
406+
path: reports
407+
- name: Generate Report and Notify Channel
408+
if: always()
409+
run: |
410+
pip install slack_sdk tabulate
411+
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
412+
350413
# M1 runner currently not well supported
351414
# TODO: (Dhruv) add these back when we setup better testing for Apple Silicon
352415
# run_nightly_tests_apple_m1:
@@ -461,4 +524,4 @@ jobs:
461524
# if: always()
462525
# run: |
463526
# pip install slack_sdk tabulate
464-
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
527+
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY

.github/workflows/pr_test_peft_backend.yml

Lines changed: 0 additions & 134 deletions
This file was deleted.

.github/workflows/pr_tests.yml

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,3 +234,67 @@ jobs:
234234
with:
235235
name: pr_${{ matrix.config.report }}_test_reports
236236
path: reports
237+
238+
run_lora_tests:
239+
needs: [check_code_quality, check_repository_consistency]
240+
strategy:
241+
fail-fast: false
242+
243+
name: LoRA tests with PEFT main
244+
245+
runs-on:
246+
group: aws-general-8-plus
247+
248+
container:
249+
image: diffusers/diffusers-pytorch-cpu
250+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
251+
252+
defaults:
253+
run:
254+
shell: bash
255+
256+
steps:
257+
- name: Checkout diffusers
258+
uses: actions/checkout@v3
259+
with:
260+
fetch-depth: 2
261+
262+
- name: Install dependencies
263+
run: |
264+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
265+
python -m uv pip install -e [quality,test]
266+
# TODO (sayakpaul, DN6): revisit `--no-deps`
267+
python -m pip install -U peft@git+https://github.com/huggingface/peft.git --no-deps
268+
python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
269+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
270+
271+
- name: Environment
272+
run: |
273+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
274+
python utils/print_env.py
275+
276+
- name: Run fast PyTorch LoRA tests with PEFT
277+
run: |
278+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
279+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
280+
-s -v \
281+
--make-reports=tests_peft_main \
282+
tests/lora/
283+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
284+
-s -v \
285+
--make-reports=tests_models_lora_peft_main \
286+
tests/models/ -k "lora"
287+
288+
- name: Failure short reports
289+
if: ${{ failure() }}
290+
run: |
291+
cat reports/tests_lora_failures_short.txt
292+
cat reports/tests_models_lora_failures_short.txt
293+
294+
- name: Test suite reports artifacts
295+
if: ${{ always() }}
296+
uses: actions/upload-artifact@v4
297+
with:
298+
name: pr_main_test_reports
299+
path: reports
300+

.github/workflows/push_tests.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -161,10 +161,11 @@ jobs:
161161

162162
flax_tpu_tests:
163163
name: Flax TPU Tests
164-
runs-on: docker-tpu
164+
runs-on:
165+
group: gcp-ct5lp-hightpu-8t
165166
container:
166167
image: diffusers/diffusers-flax-tpu
167-
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --privileged
168+
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
168169
defaults:
169170
run:
170171
shell: bash

.github/workflows/push_tests_mps.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ jobs:
4646
shell: arch -arch arm64 bash {0}
4747
run: |
4848
${CONDA_RUN} python -m pip install --upgrade pip uv
49-
${CONDA_RUN} python -m uv pip install -e [quality,test]
49+
${CONDA_RUN} python -m uv pip install -e ".[quality,test]"
5050
${CONDA_RUN} python -m uv pip install torch torchvision torchaudio
5151
${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
5252
${CONDA_RUN} python -m uv pip install transformers --upgrade

.github/workflows/pypi_publish.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ jobs:
6868
- name: Test installing diffusers and importing
6969
run: |
7070
pip install diffusers && pip uninstall diffusers -y
71-
pip install -i https://testpypi.python.org/pypi diffusers
71+
pip install -i https://test.pypi.org/simple/ diffusers
7272
python -c "from diffusers import __version__; print(__version__)"
7373
python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('fusing/unet-ldm-dummy-update'); pipe()"
7474
python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=None); pipe('ah suh du')"

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,9 +112,9 @@ Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to l
112112
| **Documentation** | **What can I learn?** |
113113
|---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
114114
| [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. |
115-
| [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading_overview) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. |
116-
| [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/pipeline_overview) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. |
117-
| [Optimization](https://huggingface.co/docs/diffusers/optimization/opt_overview) | Guides for how to optimize your diffusion model to run faster and consume less memory. |
115+
| [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. |
116+
| [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/overview_techniques) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. |
117+
| [Optimization](https://huggingface.co/docs/diffusers/optimization/fp16) | Guides for how to optimize your diffusion model to run faster and consume less memory. |
118118
| [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. |
119119
## Contribution
120120

0 commit comments

Comments
 (0)