Skip to content

Commit ccf2c31

Browse files
yiyixuxuDN6
andauthored
[Modular] Fast Tests (#11937)
* rearrage the params to groups: default params /image params /batch params / callback params * make style * add names property to pipeline blocks * style * remove more unused func * prepare_latents_inpaint always return noise and image_latents * up * up * update * update * update * update * update * update * update * update --------- Co-authored-by: DN6 <[email protected]>
1 parent 7b10e4a commit ccf2c31

File tree

8 files changed

+1022
-39
lines changed

8 files changed

+1022
-39
lines changed
Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
name: Fast PR tests for Modular
2+
3+
on:
4+
pull_request:
5+
branches: [main]
6+
paths:
7+
- "src/diffusers/modular_pipelines/**.py"
8+
- "src/diffusers/models/modeling_utils.py"
9+
- "src/diffusers/models/model_loading_utils.py"
10+
- "src/diffusers/pipelines/pipeline_utils.py"
11+
- "src/diffusers/pipeline_loading_utils.py"
12+
- "src/diffusers/loaders/lora_base.py"
13+
- "src/diffusers/loaders/lora_pipeline.py"
14+
- "src/diffusers/loaders/peft.py"
15+
- "tests/modular_pipelines/**.py"
16+
- ".github/**.yml"
17+
- "utils/**.py"
18+
- "setup.py"
19+
push:
20+
branches:
21+
- ci-*
22+
23+
concurrency:
24+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
25+
cancel-in-progress: true
26+
27+
env:
28+
DIFFUSERS_IS_CI: yes
29+
HF_HUB_ENABLE_HF_TRANSFER: 1
30+
OMP_NUM_THREADS: 4
31+
MKL_NUM_THREADS: 4
32+
PYTEST_TIMEOUT: 60
33+
34+
jobs:
35+
check_code_quality:
36+
runs-on: ubuntu-22.04
37+
steps:
38+
- uses: actions/checkout@v3
39+
- name: Set up Python
40+
uses: actions/setup-python@v4
41+
with:
42+
python-version: "3.10"
43+
- name: Install dependencies
44+
run: |
45+
python -m pip install --upgrade pip
46+
pip install .[quality]
47+
- name: Check quality
48+
run: make quality
49+
- name: Check if failure
50+
if: ${{ failure() }}
51+
run: |
52+
echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY
53+
54+
check_repository_consistency:
55+
needs: check_code_quality
56+
runs-on: ubuntu-22.04
57+
steps:
58+
- uses: actions/checkout@v3
59+
- name: Set up Python
60+
uses: actions/setup-python@v4
61+
with:
62+
python-version: "3.10"
63+
- name: Install dependencies
64+
run: |
65+
python -m pip install --upgrade pip
66+
pip install .[quality]
67+
- name: Check repo consistency
68+
run: |
69+
python utils/check_copies.py
70+
python utils/check_dummies.py
71+
python utils/check_support_list.py
72+
make deps_table_check_updated
73+
- name: Check if failure
74+
if: ${{ failure() }}
75+
run: |
76+
echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY
77+
78+
run_fast_tests:
79+
needs: [check_code_quality, check_repository_consistency]
80+
strategy:
81+
fail-fast: false
82+
matrix:
83+
config:
84+
- name: Fast PyTorch Modular Pipeline CPU tests
85+
framework: pytorch_pipelines
86+
runner: aws-highmemory-32-plus
87+
image: diffusers/diffusers-pytorch-cpu
88+
report: torch_cpu_modular_pipelines
89+
90+
name: ${{ matrix.config.name }}
91+
92+
runs-on:
93+
group: ${{ matrix.config.runner }}
94+
95+
container:
96+
image: ${{ matrix.config.image }}
97+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
98+
99+
defaults:
100+
run:
101+
shell: bash
102+
103+
steps:
104+
- name: Checkout diffusers
105+
uses: actions/checkout@v3
106+
with:
107+
fetch-depth: 2
108+
109+
- name: Install dependencies
110+
run: |
111+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
112+
python -m uv pip install -e [quality,test]
113+
pip uninstall transformers -y && python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
114+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
115+
116+
- name: Environment
117+
run: |
118+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
119+
python utils/print_env.py
120+
121+
- name: Run fast PyTorch Pipeline CPU tests
122+
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
123+
run: |
124+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
125+
python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile \
126+
-s -v -k "not Flax and not Onnx" \
127+
--make-reports=tests_${{ matrix.config.report }} \
128+
tests/modular_pipelines
129+
130+
- name: Failure short reports
131+
if: ${{ failure() }}
132+
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
133+
134+
- name: Test suite reports artifacts
135+
if: ${{ always() }}
136+
uses: actions/upload-artifact@v4
137+
with:
138+
name: pr_${{ matrix.config.framework }}_${{ matrix.config.report }}_test_reports
139+
path: reports
140+
141+

src/diffusers/modular_pipelines/modular_pipeline.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -493,6 +493,22 @@ def combine_outputs(*named_output_lists: List[Tuple[str, List[OutputParam]]]) ->
493493

494494
return list(combined_dict.values())
495495

496+
@property
497+
def input_names(self) -> List[str]:
498+
return [input_param.name for input_param in self.inputs]
499+
500+
@property
501+
def intermediate_input_names(self) -> List[str]:
502+
return [input_param.name for input_param in self.intermediate_inputs]
503+
504+
@property
505+
def intermediate_output_names(self) -> List[str]:
506+
return [output_param.name for output_param in self.intermediate_outputs]
507+
508+
@property
509+
def output_names(self) -> List[str]:
510+
return [output_param.name for output_param in self.outputs]
511+
496512

497513
class PipelineBlock(ModularPipelineBlocks):
498514
"""
@@ -2839,3 +2855,8 @@ def _dict_to_component_spec(
28392855
type_hint=type_hint,
28402856
**spec_dict,
28412857
)
2858+
2859+
def set_progress_bar_config(self, **kwargs):
2860+
for sub_block_name, sub_block in self.blocks.sub_blocks.items():
2861+
if hasattr(sub_block, "set_progress_bar_config"):
2862+
sub_block.set_progress_bar_config(**kwargs)

src/diffusers/modular_pipelines/stable_diffusion_xl/before_denoise.py

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -744,8 +744,6 @@ def prepare_latents_inpaint(
744744
timestep=None,
745745
is_strength_max=True,
746746
add_noise=True,
747-
return_noise=False,
748-
return_image_latents=False,
749747
):
750748
shape = (
751749
batch_size,
@@ -768,7 +766,7 @@ def prepare_latents_inpaint(
768766
if image.shape[1] == 4:
769767
image_latents = image.to(device=device, dtype=dtype)
770768
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
771-
elif return_image_latents or (latents is None and not is_strength_max):
769+
elif latents is None and not is_strength_max:
772770
image = image.to(device=device, dtype=dtype)
773771
image_latents = self._encode_vae_image(components, image=image, generator=generator)
774772
image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
@@ -786,13 +784,7 @@ def prepare_latents_inpaint(
786784
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
787785
latents = image_latents.to(device)
788786

789-
outputs = (latents,)
790-
791-
if return_noise:
792-
outputs += (noise,)
793-
794-
if return_image_latents:
795-
outputs += (image_latents,)
787+
outputs = (latents, noise, image_latents)
796788

797789
return outputs
798790

@@ -864,7 +856,7 @@ def __call__(self, components: StableDiffusionXLModularPipeline, state: Pipeline
864856
block_state.height = block_state.image_latents.shape[-2] * components.vae_scale_factor
865857
block_state.width = block_state.image_latents.shape[-1] * components.vae_scale_factor
866858

867-
block_state.latents, block_state.noise = self.prepare_latents_inpaint(
859+
block_state.latents, block_state.noise, block_state.image_latents = self.prepare_latents_inpaint(
868860
components,
869861
block_state.batch_size * block_state.num_images_per_prompt,
870862
components.num_channels_latents,
@@ -878,8 +870,6 @@ def __call__(self, components: StableDiffusionXLModularPipeline, state: Pipeline
878870
timestep=block_state.latent_timestep,
879871
is_strength_max=block_state.is_strength_max,
880872
add_noise=block_state.add_noise,
881-
return_noise=True,
882-
return_image_latents=False,
883873
)
884874

885875
# 7. Prepare mask latent variables

tests/modular_pipelines/__init__.py

Whitespace-only changes.

tests/modular_pipelines/stable_diffusion_xl/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)