Skip to content

Commit 1fdcedf

Browse files
authored
Merge branch 'main' into gguf-space
2 parents c22779a + f20aba3 commit 1fdcedf

File tree

16 files changed

+1067
-151
lines changed

16 files changed

+1067
-151
lines changed
Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
name: Fast PR tests for Modular
2+
3+
on:
4+
pull_request:
5+
branches: [main]
6+
paths:
7+
- "src/diffusers/modular_pipelines/**.py"
8+
- "src/diffusers/models/modeling_utils.py"
9+
- "src/diffusers/models/model_loading_utils.py"
10+
- "src/diffusers/pipelines/pipeline_utils.py"
11+
- "src/diffusers/pipeline_loading_utils.py"
12+
- "src/diffusers/loaders/lora_base.py"
13+
- "src/diffusers/loaders/lora_pipeline.py"
14+
- "src/diffusers/loaders/peft.py"
15+
- "tests/modular_pipelines/**.py"
16+
- ".github/**.yml"
17+
- "utils/**.py"
18+
- "setup.py"
19+
push:
20+
branches:
21+
- ci-*
22+
23+
concurrency:
24+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
25+
cancel-in-progress: true
26+
27+
env:
28+
DIFFUSERS_IS_CI: yes
29+
HF_HUB_ENABLE_HF_TRANSFER: 1
30+
OMP_NUM_THREADS: 4
31+
MKL_NUM_THREADS: 4
32+
PYTEST_TIMEOUT: 60
33+
34+
jobs:
35+
check_code_quality:
36+
runs-on: ubuntu-22.04
37+
steps:
38+
- uses: actions/checkout@v3
39+
- name: Set up Python
40+
uses: actions/setup-python@v4
41+
with:
42+
python-version: "3.10"
43+
- name: Install dependencies
44+
run: |
45+
python -m pip install --upgrade pip
46+
pip install .[quality]
47+
- name: Check quality
48+
run: make quality
49+
- name: Check if failure
50+
if: ${{ failure() }}
51+
run: |
52+
echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY
53+
54+
check_repository_consistency:
55+
needs: check_code_quality
56+
runs-on: ubuntu-22.04
57+
steps:
58+
- uses: actions/checkout@v3
59+
- name: Set up Python
60+
uses: actions/setup-python@v4
61+
with:
62+
python-version: "3.10"
63+
- name: Install dependencies
64+
run: |
65+
python -m pip install --upgrade pip
66+
pip install .[quality]
67+
- name: Check repo consistency
68+
run: |
69+
python utils/check_copies.py
70+
python utils/check_dummies.py
71+
python utils/check_support_list.py
72+
make deps_table_check_updated
73+
- name: Check if failure
74+
if: ${{ failure() }}
75+
run: |
76+
echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY
77+
78+
run_fast_tests:
79+
needs: [check_code_quality, check_repository_consistency]
80+
strategy:
81+
fail-fast: false
82+
matrix:
83+
config:
84+
- name: Fast PyTorch Modular Pipeline CPU tests
85+
framework: pytorch_pipelines
86+
runner: aws-highmemory-32-plus
87+
image: diffusers/diffusers-pytorch-cpu
88+
report: torch_cpu_modular_pipelines
89+
90+
name: ${{ matrix.config.name }}
91+
92+
runs-on:
93+
group: ${{ matrix.config.runner }}
94+
95+
container:
96+
image: ${{ matrix.config.image }}
97+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
98+
99+
defaults:
100+
run:
101+
shell: bash
102+
103+
steps:
104+
- name: Checkout diffusers
105+
uses: actions/checkout@v3
106+
with:
107+
fetch-depth: 2
108+
109+
- name: Install dependencies
110+
run: |
111+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
112+
python -m uv pip install -e [quality,test]
113+
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
114+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
115+
116+
- name: Environment
117+
run: |
118+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
119+
python utils/print_env.py
120+
121+
- name: Run fast PyTorch Pipeline CPU tests
122+
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
123+
run: |
124+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
125+
python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile \
126+
-s -v -k "not Flax and not Onnx" \
127+
--make-reports=tests_${{ matrix.config.report }} \
128+
tests/modular_pipelines
129+
130+
- name: Failure short reports
131+
if: ${{ failure() }}
132+
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
133+
134+
- name: Test suite reports artifacts
135+
if: ${{ always() }}
136+
uses: actions/upload-artifact@v4
137+
with:
138+
name: pr_${{ matrix.config.framework }}_${{ matrix.config.report }}_test_reports
139+
path: reports
140+
141+

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@
116116
"librosa",
117117
"numpy",
118118
"parameterized",
119-
"peft>=0.15.0",
119+
"peft>=0.17.0",
120120
"protobuf>=3.20.3,<4",
121121
"pytest",
122122
"pytest-timeout",

src/diffusers/dependency_versions_table.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
"librosa": "librosa",
2424
"numpy": "numpy",
2525
"parameterized": "parameterized",
26-
"peft": "peft>=0.15.0",
26+
"peft": "peft>=0.17.0",
2727
"protobuf": "protobuf>=3.20.3,<4",
2828
"pytest": "pytest",
2929
"pytest-timeout": "pytest-timeout",

src/diffusers/loaders/lora_conversion_utils.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -817,7 +817,11 @@ def _convert(original_key, diffusers_key, state_dict, new_state_dict):
817817
# has both `peft` and non-peft state dict.
818818
has_peft_state_dict = any(k.startswith("transformer.") for k in state_dict)
819819
if has_peft_state_dict:
820-
state_dict = {k: v for k, v in state_dict.items() if k.startswith("transformer.")}
820+
state_dict = {
821+
k.replace("lora_down.weight", "lora_A.weight").replace("lora_up.weight", "lora_B.weight"): v
822+
for k, v in state_dict.items()
823+
if k.startswith("transformer.")
824+
}
821825
return state_dict
822826

823827
# Another weird one.

src/diffusers/loaders/peft.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,9 @@ def map_state_dict_for_hotswap(sd):
320320
# it to None
321321
incompatible_keys = None
322322
else:
323-
inject_adapter_in_model(lora_config, self, adapter_name=adapter_name, **peft_kwargs)
323+
inject_adapter_in_model(
324+
lora_config, self, adapter_name=adapter_name, state_dict=state_dict, **peft_kwargs
325+
)
324326
incompatible_keys = set_peft_model_state_dict(self, state_dict, adapter_name, **peft_kwargs)
325327

326328
if self._prepare_lora_hotswap_kwargs is not None:

src/diffusers/modular_pipelines/modular_pipeline.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -493,6 +493,22 @@ def combine_outputs(*named_output_lists: List[Tuple[str, List[OutputParam]]]) ->
493493

494494
return list(combined_dict.values())
495495

496+
@property
497+
def input_names(self) -> List[str]:
498+
return [input_param.name for input_param in self.inputs]
499+
500+
@property
501+
def intermediate_input_names(self) -> List[str]:
502+
return [input_param.name for input_param in self.intermediate_inputs]
503+
504+
@property
505+
def intermediate_output_names(self) -> List[str]:
506+
return [output_param.name for output_param in self.intermediate_outputs]
507+
508+
@property
509+
def output_names(self) -> List[str]:
510+
return [output_param.name for output_param in self.outputs]
511+
496512

497513
class PipelineBlock(ModularPipelineBlocks):
498514
"""
@@ -2839,3 +2855,8 @@ def _dict_to_component_spec(
28392855
type_hint=type_hint,
28402856
**spec_dict,
28412857
)
2858+
2859+
def set_progress_bar_config(self, **kwargs):
2860+
for sub_block_name, sub_block in self.blocks.sub_blocks.items():
2861+
if hasattr(sub_block, "set_progress_bar_config"):
2862+
sub_block.set_progress_bar_config(**kwargs)

src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -744,8 +744,6 @@ def prepare_latents_inpaint(
744744
timestep=None,
745745
is_strength_max=True,
746746
add_noise=True,
747-
return_noise=False,
748-
return_image_latents=False,
749747
):
750748
shape = (
751749
batch_size,
@@ -768,7 +766,7 @@ def prepare_latents_inpaint(
768766
if image.shape[1] == 4:
769767
image_latents = image.to(device=device, dtype=dtype)
770768
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
771-
elif return_image_latents or (latents is None and not is_strength_max):
769+
elif latents is None and not is_strength_max:
772770
image = image.to(device=device, dtype=dtype)
773771
image_latents = self._encode_vae_image(components, image=image, generator=generator)
774772
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
@@ -786,13 +784,7 @@ def prepare_latents_inpaint(
786784
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
787785
latents = image_latents.to(device)
788786

789-
outputs = (latents,)
790-
791-
if return_noise:
792-
outputs += (noise,)
793-
794-
if return_image_latents:
795-
outputs += (image_latents,)
787+
outputs = (latents, noise, image_latents)
796788

797789
return outputs
798790

@@ -864,7 +856,7 @@ def __call__(self, components: StableDiffusionXLModularPipeline, state: Pipeline
864856
block_state.height = block_state.image_latents.shape[-2] * components.vae_scale_factor
865857
block_state.width = block_state.image_latents.shape[-1] * components.vae_scale_factor
866858

867-
block_state.latents, block_state.noise = self.prepare_latents_inpaint(
859+
block_state.latents, block_state.noise, block_state.image_latents = self.prepare_latents_inpaint(
868860
components,
869861
block_state.batch_size * block_state.num_images_per_prompt,
870862
components.num_channels_latents,
@@ -878,8 +870,6 @@ def __call__(self, components: StableDiffusionXLModularPipeline, state: Pipeline
878870
timestep=block_state.latent_timestep,
879871
is_strength_max=block_state.is_strength_max,
880872
add_noise=block_state.add_noise,
881-
return_noise=True,
882-
return_image_latents=False,
883873
)
884874

885875
# 7. Prepare mask latent variables

src/diffusers/pipelines/qwenimage/pipeline_qwenimage.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ def _get_qwen_prompt_embeds(
201201
txt = [template.format(e) for e in prompt]
202202
txt_tokens = self.tokenizer(
203203
txt, max_length=self.tokenizer_max_length + drop_idx, padding=True, truncation=True, return_tensors="pt"
204-
).to(self.device)
204+
).to(device)
205205
encoder_hidden_states = self.text_encoder(
206206
input_ids=txt_tokens.input_ids,
207207
attention_mask=txt_tokens.attention_mask,

src/diffusers/utils/peft_utils.py

Lines changed: 0 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -197,20 +197,6 @@ def get_peft_kwargs(
197197
"lora_bias": lora_bias,
198198
}
199199

200-
# Example: try load FusionX LoRA into Wan VACE
201-
exclude_modules = _derive_exclude_modules(model_state_dict, peft_state_dict, adapter_name)
202-
if exclude_modules:
203-
if not is_peft_version(">=", "0.14.0"):
204-
msg = """
205-
It seems like there are certain modules that need to be excluded when initializing `LoraConfig`. Your current `peft`
206-
version doesn't support passing an `exclude_modules` to `LoraConfig`. Please update it by running `pip install -U
207-
peft`. For most cases, this can be completely ignored. But if it seems unexpected, please file an issue -
208-
https://github.com/huggingface/diffusers/issues/new
209-
"""
210-
logger.debug(msg)
211-
else:
212-
lora_config_kwargs.update({"exclude_modules": exclude_modules})
213-
214200
return lora_config_kwargs
215201

216202

@@ -388,27 +374,3 @@ def _maybe_warn_for_unhandled_keys(incompatible_keys, adapter_name):
388374

389375
if warn_msg:
390376
logger.warning(warn_msg)
391-
392-
393-
def _derive_exclude_modules(model_state_dict, peft_state_dict, adapter_name=None):
394-
"""
395-
Derives the modules to exclude while initializing `LoraConfig` through `exclude_modules`. It works by comparing the
396-
`model_state_dict` and `peft_state_dict` and adds a module from `model_state_dict` to the exclusion set if it
397-
doesn't exist in `peft_state_dict`.
398-
"""
399-
if model_state_dict is None:
400-
return
401-
all_modules = set()
402-
string_to_replace = f"{adapter_name}." if adapter_name else ""
403-
404-
for name in model_state_dict.keys():
405-
if string_to_replace:
406-
name = name.replace(string_to_replace, "")
407-
if "." in name:
408-
module_name = name.rsplit(".", 1)[0]
409-
all_modules.add(module_name)
410-
411-
target_modules_set = {name.split(".lora")[0] for name in peft_state_dict.keys()}
412-
exclude_modules = list(all_modules - target_modules_set)
413-
414-
return exclude_modules

0 commit comments

Comments
 (0)