Skip to content

Commit 0978b60

Browse files
EdnaordinaryDN6
authored andcommitted
fix tests
1 parent 4e24f26 commit 0978b60

File tree

4 files changed

+16
-3
lines changed

4 files changed

+16
-3
lines changed

src/diffusers/pipelines/chroma/pipeline_chroma.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -675,6 +675,13 @@ def __call__(
675675
batch_size = 1
676676
elif prompt is not None and isinstance(prompt, list):
677677
batch_size = len(prompt)
678+
if negative_prompt is not None and isinstance(negative_prompt, str):
679+
negative_prompt = [negative_prompt] * batch_size
680+
elif negative_prompt is not None and isinstance(negative_prompt, list):
681+
if len(negative_prompt) == 1:
682+
negative_prompt = [negative_prompt] * batch_size
683+
else:
684+
raise ValueError("prompt and negative_prompt are lists of unequal size")
678685
else:
679686
batch_size = prompt_embeds.shape[0]
680687

tests/models/transformers/test_models_transformer_chroma.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ class ChromaTransformerTests(ModelTesterMixin, unittest.TestCase):
8282
model_class = ChromaTransformer2DModel
8383
main_input_name = "hidden_states"
8484
# We override the items here because the transformer under consideration is small.
85-
model_split_percents = [0.7, 0.6, 0.6]
85+
model_split_percents = [0.8, 0.7, 0.7]
8686

8787
# Skip setting testing with default: AttnProcessor
8888
uses_custom_attn_processor = True

tests/models/transformers/test_models_transformer_flux.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,9 @@ def create_flux_ip_adapter_state_dict(model):
5757

5858
image_projection = ImageProjection(
5959
cross_attention_dim=model.config["joint_attention_dim"],
60-
image_embed_dim=model.config["pooled_projection_dim"],
60+
image_embed_dim=(
61+
model.config["pooled_projection_dim"] if "pooled_projection_dim" in model.config.keys() else 768
62+
),
6163
num_image_text_embeds=4,
6264
)
6365

tests/pipelines/test_pipelines_common.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -544,7 +544,11 @@ def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=N
544544
components = self.get_dummy_components()
545545
pipe = self.pipeline_class(**components).to(torch_device)
546546
pipe.set_progress_bar_config(disable=None)
547-
image_embed_dim = pipe.transformer.config.pooled_projection_dim or 768
547+
image_embed_dim = (
548+
pipe.transformer.config.pooled_projection_dim
549+
if hasattr(pipe.transformer.config, "pooled_projection_dim")
550+
else 768
551+
)
548552

549553
# forward pass without ip adapter
550554
inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device))

0 commit comments

Comments
 (0)