Skip to content

Commit 30e6db6

Browse files
authored
Merge branch 'main' into flux_guidancecontrol_inpaint
2 parents c3bc32e + c002724 commit 30e6db6

35 files changed

+7346
-143
lines changed

.github/workflows/nightly_tests.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -238,12 +238,13 @@ jobs:
238238
239239
run_flax_tpu_tests:
240240
name: Nightly Flax TPU Tests
241-
runs-on: docker-tpu
241+
runs-on:
242+
group: gcp-ct5lp-hightpu-8t
242243
if: github.event_name == 'schedule'
243244

244245
container:
245246
image: diffusers/diffusers-flax-tpu
246-
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --privileged
247+
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
247248
defaults:
248249
run:
249250
shell: bash
@@ -519,4 +520,4 @@ jobs:
519520
# if: always()
520521
# run: |
521522
# pip install slack_sdk tabulate
522-
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
523+
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY

.github/workflows/pr_test_peft_backend.yml

Lines changed: 0 additions & 134 deletions
This file was deleted.

.github/workflows/pr_tests.yml

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,3 +234,67 @@ jobs:
234234
with:
235235
name: pr_${{ matrix.config.report }}_test_reports
236236
path: reports
237+
238+
run_lora_tests:
239+
needs: [check_code_quality, check_repository_consistency]
240+
strategy:
241+
fail-fast: false
242+
243+
name: LoRA tests with PEFT main
244+
245+
runs-on:
246+
group: aws-general-8-plus
247+
248+
container:
249+
image: diffusers/diffusers-pytorch-cpu
250+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
251+
252+
defaults:
253+
run:
254+
shell: bash
255+
256+
steps:
257+
- name: Checkout diffusers
258+
uses: actions/checkout@v3
259+
with:
260+
fetch-depth: 2
261+
262+
- name: Install dependencies
263+
run: |
264+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
265+
python -m uv pip install -e [quality,test]
266+
# TODO (sayakpaul, DN6): revisit `--no-deps`
267+
python -m pip install -U peft@git+https://github.com/huggingface/peft.git --no-deps
268+
python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
269+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
270+
271+
- name: Environment
272+
run: |
273+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
274+
python utils/print_env.py
275+
276+
- name: Run fast PyTorch LoRA tests with PEFT
277+
run: |
278+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
279+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
280+
-s -v \
281+
--make-reports=tests_peft_main \
282+
tests/lora/
283+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
284+
-s -v \
285+
--make-reports=tests_models_lora_peft_main \
286+
tests/models/ -k "lora"
287+
288+
- name: Failure short reports
289+
if: ${{ failure() }}
290+
run: |
291+
cat reports/tests_lora_failures_short.txt
292+
cat reports/tests_models_lora_failures_short.txt
293+
294+
- name: Test suite reports artifacts
295+
if: ${{ always() }}
296+
uses: actions/upload-artifact@v4
297+
with:
298+
name: pr_main_test_reports
299+
path: reports
300+

.github/workflows/push_tests.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -161,11 +161,11 @@ jobs:
161161

162162
flax_tpu_tests:
163163
name: Flax TPU Tests
164-
runs-on: docker-tpu
164+
runs-on:
165+
group: gcp-ct5lp-hightpu-8t
165166
container:
166167
image: diffusers/diffusers-flax-tpu
167-
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --privileged
168-
defaults:
168+
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache defaults:
169169
run:
170170
shell: bash
171171
steps:

docs/source/en/_toctree.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,8 @@
274274
title: LatteTransformer3DModel
275275
- local: api/models/lumina_nextdit2d
276276
title: LuminaNextDiT2DModel
277+
- local: api/models/ltx_video_transformer3d
278+
title: LTXVideoTransformer3DModel
277279
- local: api/models/mochi_transformer3d
278280
title: MochiTransformer3DModel
279281
- local: api/models/pixart_transformer2d
@@ -312,6 +314,8 @@
312314
title: AutoencoderKLAllegro
313315
- local: api/models/autoencoderkl_cogvideox
314316
title: AutoencoderKLCogVideoX
317+
- local: api/models/autoencoderkl_ltx_video
318+
title: AutoencoderKLLTXVideo
315319
- local: api/models/autoencoderkl_mochi
316320
title: AutoencoderKLMochi
317321
- local: api/models/asymmetricautoencoderkl
@@ -408,6 +412,8 @@
408412
title: Latte
409413
- local: api/pipelines/ledits_pp
410414
title: LEDITS++
415+
- local: api/pipelines/ltx_video
416+
title: LTX
411417
- local: api/pipelines/lumina
412418
title: Lumina-T2X
413419
- local: api/pipelines/marigold
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4+
the License. You may obtain a copy of the License at
5+
6+
http://www.apache.org/licenses/LICENSE-2.0
7+
8+
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9+
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10+
specific language governing permissions and limitations under the License. -->
11+
12+
# AutoencoderKLLTXVideo
13+
14+
The 3D variational autoencoder (VAE) model with KL loss used in [LTX](https://huggingface.co/Lightricks/LTX-Video) was introduced by Lightricks.
15+
16+
The model can be loaded with the following code snippet.
17+
18+
```python
19+
from diffusers import AutoencoderKLLTXVideo
20+
21+
vae = AutoencoderKLLTXVideo.from_pretrained("TODO/TODO", subfolder="vae", torch_dtype=torch.float32).to("cuda")
22+
```
23+
24+
## AutoencoderKLLTXVideo
25+
26+
[[autodoc]] AutoencoderKLLTXVideo
27+
- decode
28+
- encode
29+
- all
30+
31+
## AutoencoderKLOutput
32+
33+
[[autodoc]] models.autoencoders.autoencoder_kl.AutoencoderKLOutput
34+
35+
## DecoderOutput
36+
37+
[[autodoc]] models.autoencoders.vae.DecoderOutput
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4+
the License. You may obtain a copy of the License at
5+
6+
http://www.apache.org/licenses/LICENSE-2.0
7+
8+
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9+
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10+
specific language governing permissions and limitations under the License. -->
11+
12+
# LTXVideoTransformer3DModel
13+
14+
A Diffusion Transformer model for 3D data from [LTX](https://huggingface.co/Lightricks/LTX-Video) was introduced by Lightricks.
15+
16+
The model can be loaded with the following code snippet.
17+
18+
```python
19+
from diffusers import LTXVideoTransformer3DModel
20+
21+
transformer = LTXVideoTransformer3DModel.from_pretrained("TODO/TODO", subfolder="transformer", torch_dtype=torch.bfloat16).to("cuda")
22+
```
23+
24+
## LTXVideoTransformer3DModel
25+
26+
[[autodoc]] LTXVideoTransformer3DModel
27+
28+
## Transformer2DModelOutput
29+
30+
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput

0 commit comments

Comments
 (0)