Skip to content

Commit 519ec44

Browse files
authored
Merge branch 'main' into sd2x_ldm_singlefile_fix
2 parents c88b6c1 + 7513162 commit 519ec44

19 files changed

+8
-865
lines changed

tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -158,30 +158,6 @@ def test_flux_controlnet_different_prompts(self):
158158

159159
assert max_diff > 1e-6
160160

161-
def test_flux_controlnet_prompt_embeds(self):
162-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
163-
inputs = self.get_dummy_inputs(torch_device)
164-
165-
output_with_prompt = pipe(**inputs).images[0]
166-
167-
inputs = self.get_dummy_inputs(torch_device)
168-
prompt = inputs.pop("prompt")
169-
170-
(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
171-
prompt,
172-
prompt_2=None,
173-
device=torch_device,
174-
max_sequence_length=inputs["max_sequence_length"],
175-
)
176-
output_with_embeds = pipe(
177-
prompt_embeds=prompt_embeds,
178-
pooled_prompt_embeds=pooled_prompt_embeds,
179-
**inputs,
180-
).images[0]
181-
182-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
183-
assert max_diff < 1e-4
184-
185161
def test_fused_qkv_projections(self):
186162
device = "cpu" # ensure determinism for the device-dependent torch.Generator
187163
components = self.get_dummy_components()

tests/pipelines/flux/test_pipeline_flux.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -136,30 +136,6 @@ def test_flux_different_prompts(self):
136136
# For some reasons, they don't show large differences
137137
assert max_diff > 1e-6
138138

139-
def test_flux_prompt_embeds(self):
140-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
141-
inputs = self.get_dummy_inputs(torch_device)
142-
143-
output_with_prompt = pipe(**inputs).images[0]
144-
145-
inputs = self.get_dummy_inputs(torch_device)
146-
prompt = inputs.pop("prompt")
147-
148-
(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
149-
prompt,
150-
prompt_2=None,
151-
device=torch_device,
152-
max_sequence_length=inputs["max_sequence_length"],
153-
)
154-
output_with_embeds = pipe(
155-
prompt_embeds=prompt_embeds,
156-
pooled_prompt_embeds=pooled_prompt_embeds,
157-
**inputs,
158-
).images[0]
159-
160-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
161-
assert max_diff < 1e-4
162-
163139
def test_fused_qkv_projections(self):
164140
device = "cpu" # ensure determinism for the device-dependent torch.Generator
165141
components = self.get_dummy_components()

tests/pipelines/flux/test_pipeline_flux_control.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -126,30 +126,6 @@ def test_flux_different_prompts(self):
126126
# For some reasons, they don't show large differences
127127
assert max_diff > 1e-6
128128

129-
def test_flux_prompt_embeds(self):
130-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
131-
inputs = self.get_dummy_inputs(torch_device)
132-
133-
output_with_prompt = pipe(**inputs).images[0]
134-
135-
inputs = self.get_dummy_inputs(torch_device)
136-
prompt = inputs.pop("prompt")
137-
138-
(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
139-
prompt,
140-
prompt_2=None,
141-
device=torch_device,
142-
max_sequence_length=inputs["max_sequence_length"],
143-
)
144-
output_with_embeds = pipe(
145-
prompt_embeds=prompt_embeds,
146-
pooled_prompt_embeds=pooled_prompt_embeds,
147-
**inputs,
148-
).images[0]
149-
150-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
151-
assert max_diff < 1e-4
152-
153129
def test_fused_qkv_projections(self):
154130
device = "cpu" # ensure determinism for the device-dependent torch.Generator
155131
components = self.get_dummy_components()

tests/pipelines/flux/test_pipeline_flux_control_img2img.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -129,30 +129,6 @@ def test_flux_different_prompts(self):
129129
# For some reasons, they don't show large differences
130130
assert max_diff > 1e-6
131131

132-
def test_flux_prompt_embeds(self):
133-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
134-
inputs = self.get_dummy_inputs(torch_device)
135-
136-
output_with_prompt = pipe(**inputs).images[0]
137-
138-
inputs = self.get_dummy_inputs(torch_device)
139-
prompt = inputs.pop("prompt")
140-
141-
(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
142-
prompt,
143-
prompt_2=None,
144-
device=torch_device,
145-
max_sequence_length=inputs["max_sequence_length"],
146-
)
147-
output_with_embeds = pipe(
148-
prompt_embeds=prompt_embeds,
149-
pooled_prompt_embeds=pooled_prompt_embeds,
150-
**inputs,
151-
).images[0]
152-
153-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
154-
assert max_diff < 1e-4
155-
156132
def test_flux_image_output_shape(self):
157133
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
158134
inputs = self.get_dummy_inputs(torch_device)

tests/pipelines/flux/test_pipeline_flux_control_inpaint.py

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -120,46 +120,6 @@ def get_dummy_inputs(self, device, seed=0):
120120
}
121121
return inputs
122122

123-
# def test_flux_different_prompts(self):
124-
# pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
125-
126-
# inputs = self.get_dummy_inputs(torch_device)
127-
# output_same_prompt = pipe(**inputs).images[0]
128-
129-
# inputs = self.get_dummy_inputs(torch_device)
130-
# inputs["prompt_2"] = "a different prompt"
131-
# output_different_prompts = pipe(**inputs).images[0]
132-
133-
# max_diff = np.abs(output_same_prompt - output_different_prompts).max()
134-
135-
# # Outputs should be different here
136-
# # For some reasons, they don't show large differences
137-
# assert max_diff > 1e-6
138-
139-
def test_flux_prompt_embeds(self):
140-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
141-
inputs = self.get_dummy_inputs(torch_device)
142-
143-
output_with_prompt = pipe(**inputs).images[0]
144-
145-
inputs = self.get_dummy_inputs(torch_device)
146-
prompt = inputs.pop("prompt")
147-
148-
(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
149-
prompt,
150-
prompt_2=None,
151-
device=torch_device,
152-
max_sequence_length=inputs["max_sequence_length"],
153-
)
154-
output_with_embeds = pipe(
155-
prompt_embeds=prompt_embeds,
156-
pooled_prompt_embeds=pooled_prompt_embeds,
157-
**inputs,
158-
).images[0]
159-
160-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
161-
assert max_diff < 1e-4
162-
163123
def test_fused_qkv_projections(self):
164124
device = "cpu" # ensure determinism for the device-dependent torch.Generator
165125
components = self.get_dummy_components()

tests/pipelines/flux/test_pipeline_flux_fill.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -128,30 +128,6 @@ def test_flux_fill_different_prompts(self):
128128
# For some reasons, they don't show large differences
129129
assert max_diff > 1e-6
130130

131-
def test_flux_fill_prompt_embeds(self):
132-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
133-
inputs = self.get_dummy_inputs(torch_device)
134-
135-
output_with_prompt = pipe(**inputs).images[0]
136-
137-
inputs = self.get_dummy_inputs(torch_device)
138-
prompt = inputs.pop("prompt")
139-
140-
(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
141-
prompt,
142-
prompt_2=None,
143-
device=torch_device,
144-
max_sequence_length=inputs["max_sequence_length"],
145-
)
146-
output_with_embeds = pipe(
147-
prompt_embeds=prompt_embeds,
148-
pooled_prompt_embeds=pooled_prompt_embeds,
149-
**inputs,
150-
).images[0]
151-
152-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
153-
assert max_diff < 1e-4
154-
155131
def test_flux_image_output_shape(self):
156132
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
157133
inputs = self.get_dummy_inputs(torch_device)

tests/pipelines/flux/test_pipeline_flux_img2img.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -126,30 +126,6 @@ def test_flux_different_prompts(self):
126126
# For some reasons, they don't show large differences
127127
assert max_diff > 1e-6
128128

129-
def test_flux_prompt_embeds(self):
130-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
131-
inputs = self.get_dummy_inputs(torch_device)
132-
133-
output_with_prompt = pipe(**inputs).images[0]
134-
135-
inputs = self.get_dummy_inputs(torch_device)
136-
prompt = inputs.pop("prompt")
137-
138-
(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
139-
prompt,
140-
prompt_2=None,
141-
device=torch_device,
142-
max_sequence_length=inputs["max_sequence_length"],
143-
)
144-
output_with_embeds = pipe(
145-
prompt_embeds=prompt_embeds,
146-
pooled_prompt_embeds=pooled_prompt_embeds,
147-
**inputs,
148-
).images[0]
149-
150-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
151-
assert max_diff < 1e-4
152-
153129
def test_flux_image_output_shape(self):
154130
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
155131
inputs = self.get_dummy_inputs(torch_device)

tests/pipelines/flux/test_pipeline_flux_inpaint.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -128,30 +128,6 @@ def test_flux_inpaint_different_prompts(self):
128128
# For some reasons, they don't show large differences
129129
assert max_diff > 1e-6
130130

131-
def test_flux_inpaint_prompt_embeds(self):
132-
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
133-
inputs = self.get_dummy_inputs(torch_device)
134-
135-
output_with_prompt = pipe(**inputs).images[0]
136-
137-
inputs = self.get_dummy_inputs(torch_device)
138-
prompt = inputs.pop("prompt")
139-
140-
(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
141-
prompt,
142-
prompt_2=None,
143-
device=torch_device,
144-
max_sequence_length=inputs["max_sequence_length"],
145-
)
146-
output_with_embeds = pipe(
147-
prompt_embeds=prompt_embeds,
148-
pooled_prompt_embeds=pooled_prompt_embeds,
149-
**inputs,
150-
).images[0]
151-
152-
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
153-
assert max_diff < 1e-4
154-
155131
def test_flux_image_output_shape(self):
156132
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
157133
inputs = self.get_dummy_inputs(torch_device)

tests/pipelines/hunyuandit/test_hunyuan_dit.py

Lines changed: 2 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
# limitations under the License.
1515

1616
import gc
17-
import tempfile
1817
import unittest
1918

2019
import numpy as np
@@ -128,10 +127,12 @@ def test_inference(self):
128127
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
129128
self.assertLessEqual(max_diff, 1e-3)
130129

130+
@unittest.skip("Not supported.")
131131
def test_sequential_cpu_offload_forward_pass(self):
132132
# TODO(YiYi) need to fix later
133133
pass
134134

135+
@unittest.skip("Not supported.")
135136
def test_sequential_offload_forward_pass_twice(self):
136137
# TODO(YiYi) need to fix later
137138
pass
@@ -141,99 +142,6 @@ def test_inference_batch_single_identical(self):
141142
expected_max_diff=1e-3,
142143
)
143144

144-
def test_save_load_optional_components(self):
145-
components = self.get_dummy_components()
146-
pipe = self.pipeline_class(**components)
147-
pipe.to(torch_device)
148-
pipe.set_progress_bar_config(disable=None)
149-
150-
inputs = self.get_dummy_inputs(torch_device)
151-
152-
prompt = inputs["prompt"]
153-
generator = inputs["generator"]
154-
num_inference_steps = inputs["num_inference_steps"]
155-
output_type = inputs["output_type"]
156-
157-
(
158-
prompt_embeds,
159-
negative_prompt_embeds,
160-
prompt_attention_mask,
161-
negative_prompt_attention_mask,
162-
) = pipe.encode_prompt(prompt, device=torch_device, dtype=torch.float32, text_encoder_index=0)
163-
164-
(
165-
prompt_embeds_2,
166-
negative_prompt_embeds_2,
167-
prompt_attention_mask_2,
168-
negative_prompt_attention_mask_2,
169-
) = pipe.encode_prompt(
170-
prompt,
171-
device=torch_device,
172-
dtype=torch.float32,
173-
text_encoder_index=1,
174-
)
175-
176-
# inputs with prompt converted to embeddings
177-
inputs = {
178-
"prompt_embeds": prompt_embeds,
179-
"prompt_attention_mask": prompt_attention_mask,
180-
"negative_prompt_embeds": negative_prompt_embeds,
181-
"negative_prompt_attention_mask": negative_prompt_attention_mask,
182-
"prompt_embeds_2": prompt_embeds_2,
183-
"prompt_attention_mask_2": prompt_attention_mask_2,
184-
"negative_prompt_embeds_2": negative_prompt_embeds_2,
185-
"negative_prompt_attention_mask_2": negative_prompt_attention_mask_2,
186-
"generator": generator,
187-
"num_inference_steps": num_inference_steps,
188-
"output_type": output_type,
189-
"use_resolution_binning": False,
190-
}
191-
192-
# set all optional components to None
193-
for optional_component in pipe._optional_components:
194-
setattr(pipe, optional_component, None)
195-
196-
output = pipe(**inputs)[0]
197-
198-
with tempfile.TemporaryDirectory() as tmpdir:
199-
pipe.save_pretrained(tmpdir)
200-
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
201-
pipe_loaded.to(torch_device)
202-
pipe_loaded.set_progress_bar_config(disable=None)
203-
204-
for optional_component in pipe._optional_components:
205-
self.assertTrue(
206-
getattr(pipe_loaded, optional_component) is None,
207-
f"`{optional_component}` did not stay set to None after loading.",
208-
)
209-
210-
inputs = self.get_dummy_inputs(torch_device)
211-
212-
generator = inputs["generator"]
213-
num_inference_steps = inputs["num_inference_steps"]
214-
output_type = inputs["output_type"]
215-
216-
# inputs with prompt converted to embeddings
217-
inputs = {
218-
"prompt_embeds": prompt_embeds,
219-
"prompt_attention_mask": prompt_attention_mask,
220-
"negative_prompt_embeds": negative_prompt_embeds,
221-
"negative_prompt_attention_mask": negative_prompt_attention_mask,
222-
"prompt_embeds_2": prompt_embeds_2,
223-
"prompt_attention_mask_2": prompt_attention_mask_2,
224-
"negative_prompt_embeds_2": negative_prompt_embeds_2,
225-
"negative_prompt_attention_mask_2": negative_prompt_attention_mask_2,
226-
"generator": generator,
227-
"num_inference_steps": num_inference_steps,
228-
"output_type": output_type,
229-
"use_resolution_binning": False,
230-
}
231-
232-
output_loaded = pipe_loaded(**inputs)[0]
233-
234-
max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
235-
self.assertLess(max_diff, 1e-4)
236-
237145
def test_feed_forward_chunking(self):
238146
device = "cpu"
239147

0 commit comments

Comments
 (0)