Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 0 additions & 24 deletions tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,30 +158,6 @@ def test_flux_controlnet_different_prompts(self):

assert max_diff > 1e-6

def test_flux_controlnet_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)

output_with_prompt = pipe(**inputs).images[0]

inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")

(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
prompt,
prompt_2=None,
device=torch_device,
max_sequence_length=inputs["max_sequence_length"],
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**inputs,
).images[0]

max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4

def test_fused_qkv_projections(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
Expand Down
24 changes: 0 additions & 24 deletions tests/pipelines/flux/test_pipeline_flux.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,30 +136,6 @@ def test_flux_different_prompts(self):
# For some reasons, they don't show large differences
assert max_diff > 1e-6

def test_flux_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)

output_with_prompt = pipe(**inputs).images[0]

inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")

(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
prompt,
prompt_2=None,
device=torch_device,
max_sequence_length=inputs["max_sequence_length"],
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**inputs,
).images[0]

max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4

def test_fused_qkv_projections(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
Expand Down
24 changes: 0 additions & 24 deletions tests/pipelines/flux/test_pipeline_flux_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,30 +126,6 @@ def test_flux_different_prompts(self):
# For some reasons, they don't show large differences
assert max_diff > 1e-6

def test_flux_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)

output_with_prompt = pipe(**inputs).images[0]

inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")

(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
prompt,
prompt_2=None,
device=torch_device,
max_sequence_length=inputs["max_sequence_length"],
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**inputs,
).images[0]

max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4

def test_fused_qkv_projections(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
Expand Down
24 changes: 0 additions & 24 deletions tests/pipelines/flux/test_pipeline_flux_control_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,30 +129,6 @@ def test_flux_different_prompts(self):
# For some reasons, they don't show large differences
assert max_diff > 1e-6

def test_flux_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)

output_with_prompt = pipe(**inputs).images[0]

inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")

(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
prompt,
prompt_2=None,
device=torch_device,
max_sequence_length=inputs["max_sequence_length"],
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**inputs,
).images[0]

max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4

def test_flux_image_output_shape(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
Expand Down
40 changes: 0 additions & 40 deletions tests/pipelines/flux/test_pipeline_flux_control_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,46 +120,6 @@ def get_dummy_inputs(self, device, seed=0):
}
return inputs

# def test_flux_different_prompts(self):
# pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)

# inputs = self.get_dummy_inputs(torch_device)
# output_same_prompt = pipe(**inputs).images[0]

# inputs = self.get_dummy_inputs(torch_device)
# inputs["prompt_2"] = "a different prompt"
# output_different_prompts = pipe(**inputs).images[0]

# max_diff = np.abs(output_same_prompt - output_different_prompts).max()

# # Outputs should be different here
# # For some reasons, they don't show large differences
# assert max_diff > 1e-6

def test_flux_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)

output_with_prompt = pipe(**inputs).images[0]

inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")

(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
prompt,
prompt_2=None,
device=torch_device,
max_sequence_length=inputs["max_sequence_length"],
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**inputs,
).images[0]

max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4

def test_fused_qkv_projections(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
Expand Down
24 changes: 0 additions & 24 deletions tests/pipelines/flux/test_pipeline_flux_fill.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,30 +128,6 @@ def test_flux_fill_different_prompts(self):
# For some reasons, they don't show large differences
assert max_diff > 1e-6

def test_flux_fill_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)

output_with_prompt = pipe(**inputs).images[0]

inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")

(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
prompt,
prompt_2=None,
device=torch_device,
max_sequence_length=inputs["max_sequence_length"],
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**inputs,
).images[0]

max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4

def test_flux_image_output_shape(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
Expand Down
24 changes: 0 additions & 24 deletions tests/pipelines/flux/test_pipeline_flux_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,30 +126,6 @@ def test_flux_different_prompts(self):
# For some reasons, they don't show large differences
assert max_diff > 1e-6

def test_flux_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)

output_with_prompt = pipe(**inputs).images[0]

inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")

(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
prompt,
prompt_2=None,
device=torch_device,
max_sequence_length=inputs["max_sequence_length"],
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**inputs,
).images[0]

max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4

def test_flux_image_output_shape(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
Expand Down
24 changes: 0 additions & 24 deletions tests/pipelines/flux/test_pipeline_flux_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,30 +128,6 @@ def test_flux_inpaint_different_prompts(self):
# For some reasons, they don't show large differences
assert max_diff > 1e-6

def test_flux_inpaint_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)

output_with_prompt = pipe(**inputs).images[0]

inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")

(prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt(
prompt,
prompt_2=None,
device=torch_device,
max_sequence_length=inputs["max_sequence_length"],
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
**inputs,
).images[0]

max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4

def test_flux_image_output_shape(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
Expand Down
96 changes: 2 additions & 94 deletions tests/pipelines/hunyuandit/test_hunyuan_dit.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
# limitations under the License.

import gc
import tempfile
import unittest

import numpy as np
Expand Down Expand Up @@ -128,10 +127,12 @@ def test_inference(self):
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(max_diff, 1e-3)

@unittest.skip("Not supported.")
def test_sequential_cpu_offload_forward_pass(self):
# TODO(YiYi) need to fix later
pass

@unittest.skip("Not supported.")
def test_sequential_offload_forward_pass_twice(self):
# TODO(YiYi) need to fix later
pass
Expand All @@ -141,99 +142,6 @@ def test_inference_batch_single_identical(self):
expected_max_diff=1e-3,
)

def test_save_load_optional_components(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

inputs = self.get_dummy_inputs(torch_device)

prompt = inputs["prompt"]
generator = inputs["generator"]
num_inference_steps = inputs["num_inference_steps"]
output_type = inputs["output_type"]

(
prompt_embeds,
negative_prompt_embeds,
prompt_attention_mask,
negative_prompt_attention_mask,
) = pipe.encode_prompt(prompt, device=torch_device, dtype=torch.float32, text_encoder_index=0)

(
prompt_embeds_2,
negative_prompt_embeds_2,
prompt_attention_mask_2,
negative_prompt_attention_mask_2,
) = pipe.encode_prompt(
prompt,
device=torch_device,
dtype=torch.float32,
text_encoder_index=1,
)

# inputs with prompt converted to embeddings
inputs = {
"prompt_embeds": prompt_embeds,
"prompt_attention_mask": prompt_attention_mask,
"negative_prompt_embeds": negative_prompt_embeds,
"negative_prompt_attention_mask": negative_prompt_attention_mask,
"prompt_embeds_2": prompt_embeds_2,
"prompt_attention_mask_2": prompt_attention_mask_2,
"negative_prompt_embeds_2": negative_prompt_embeds_2,
"negative_prompt_attention_mask_2": negative_prompt_attention_mask_2,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
"use_resolution_binning": False,
}

# set all optional components to None
for optional_component in pipe._optional_components:
setattr(pipe, optional_component, None)

output = pipe(**inputs)[0]

with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)

for optional_component in pipe._optional_components:
self.assertTrue(
getattr(pipe_loaded, optional_component) is None,
f"`{optional_component}` did not stay set to None after loading.",
)

inputs = self.get_dummy_inputs(torch_device)

generator = inputs["generator"]
num_inference_steps = inputs["num_inference_steps"]
output_type = inputs["output_type"]

# inputs with prompt converted to embeddings
inputs = {
"prompt_embeds": prompt_embeds,
"prompt_attention_mask": prompt_attention_mask,
"negative_prompt_embeds": negative_prompt_embeds,
"negative_prompt_attention_mask": negative_prompt_attention_mask,
"prompt_embeds_2": prompt_embeds_2,
"prompt_attention_mask_2": prompt_attention_mask_2,
"negative_prompt_embeds_2": negative_prompt_embeds_2,
"negative_prompt_attention_mask_2": negative_prompt_attention_mask_2,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
"use_resolution_binning": False,
}

output_loaded = pipe_loaded(**inputs)[0]

max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
self.assertLess(max_diff, 1e-4)

def test_feed_forward_chunking(self):
device = "cpu"

Expand Down
Loading
Loading