Skip to content

Commit f1d445b

Browse files
committed
Revert "fix-copies went uncaught it seems."
This reverts commit eefb302.
1 parent e9ec593 commit f1d445b

File tree

3 files changed

+21
-36
lines changed

3 files changed

+21
-36
lines changed

src/diffusers/pipelines/flux/pipeline_flux_controlnet.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -440,28 +440,23 @@ def prepare_ip_adapter_image_embeds(
440440
if not isinstance(ip_adapter_image, list):
441441
ip_adapter_image = [ip_adapter_image]
442442

443-
if len(ip_adapter_image) != self.transformer.encoder_hid_proj.num_ip_adapters:
443+
if len(ip_adapter_image) != len(self.transformer.encoder_hid_proj.image_projection_layers):
444444
raise ValueError(
445-
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {self.transformer.encoder_hid_proj.num_ip_adapters} IP Adapters."
445+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.transformer.encoder_hid_proj.image_projection_layers)} IP Adapters."
446446
)
447447

448-
for single_ip_adapter_image in ip_adapter_image:
448+
for single_ip_adapter_image, image_proj_layer in zip(
449+
ip_adapter_image, self.transformer.encoder_hid_proj.image_projection_layers
450+
):
449451
single_image_embeds = self.encode_image(single_ip_adapter_image, device, 1)
452+
450453
image_embeds.append(single_image_embeds[None, :])
451454
else:
452-
if not isinstance(ip_adapter_image_embeds, list):
453-
ip_adapter_image_embeds = [ip_adapter_image_embeds]
454-
455-
if len(ip_adapter_image_embeds) != self.transformer.encoder_hid_proj.num_ip_adapters:
456-
raise ValueError(
457-
f"`ip_adapter_image_embeds` must have same length as the number of IP Adapters. Got {len(ip_adapter_image_embeds)} image embeds and {self.transformer.encoder_hid_proj.num_ip_adapters} IP Adapters."
458-
)
459-
460455
for single_image_embeds in ip_adapter_image_embeds:
461456
image_embeds.append(single_image_embeds)
462457

463458
ip_adapter_image_embeds = []
464-
for single_image_embeds in image_embeds:
459+
for i, single_image_embeds in enumerate(image_embeds):
465460
single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
466461
single_image_embeds = single_image_embeds.to(device=device)
467462
ip_adapter_image_embeds.append(single_image_embeds)

src/diffusers/pipelines/flux/pipeline_flux_img2img.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -427,28 +427,23 @@ def prepare_ip_adapter_image_embeds(
427427
if not isinstance(ip_adapter_image, list):
428428
ip_adapter_image = [ip_adapter_image]
429429

430-
if len(ip_adapter_image) != self.transformer.encoder_hid_proj.num_ip_adapters:
430+
if len(ip_adapter_image) != len(self.transformer.encoder_hid_proj.image_projection_layers):
431431
raise ValueError(
432-
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {self.transformer.encoder_hid_proj.num_ip_adapters} IP Adapters."
432+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.transformer.encoder_hid_proj.image_projection_layers)} IP Adapters."
433433
)
434434

435-
for single_ip_adapter_image in ip_adapter_image:
435+
for single_ip_adapter_image, image_proj_layer in zip(
436+
ip_adapter_image, self.transformer.encoder_hid_proj.image_projection_layers
437+
):
436438
single_image_embeds = self.encode_image(single_ip_adapter_image, device, 1)
439+
437440
image_embeds.append(single_image_embeds[None, :])
438441
else:
439-
if not isinstance(ip_adapter_image_embeds, list):
440-
ip_adapter_image_embeds = [ip_adapter_image_embeds]
441-
442-
if len(ip_adapter_image_embeds) != self.transformer.encoder_hid_proj.num_ip_adapters:
443-
raise ValueError(
444-
f"`ip_adapter_image_embeds` must have same length as the number of IP Adapters. Got {len(ip_adapter_image_embeds)} image embeds and {self.transformer.encoder_hid_proj.num_ip_adapters} IP Adapters."
445-
)
446-
447442
for single_image_embeds in ip_adapter_image_embeds:
448443
image_embeds.append(single_image_embeds)
449444

450445
ip_adapter_image_embeds = []
451-
for single_image_embeds in image_embeds:
446+
for i, single_image_embeds in enumerate(image_embeds):
452447
single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
453448
single_image_embeds = single_image_embeds.to(device=device)
454449
ip_adapter_image_embeds.append(single_image_embeds)

src/diffusers/pipelines/flux/pipeline_flux_inpaint.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -432,28 +432,23 @@ def prepare_ip_adapter_image_embeds(
432432
if not isinstance(ip_adapter_image, list):
433433
ip_adapter_image = [ip_adapter_image]
434434

435-
if len(ip_adapter_image) != self.transformer.encoder_hid_proj.num_ip_adapters:
435+
if len(ip_adapter_image) != len(self.transformer.encoder_hid_proj.image_projection_layers):
436436
raise ValueError(
437-
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {self.transformer.encoder_hid_proj.num_ip_adapters} IP Adapters."
437+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.transformer.encoder_hid_proj.image_projection_layers)} IP Adapters."
438438
)
439439

440-
for single_ip_adapter_image in ip_adapter_image:
440+
for single_ip_adapter_image, image_proj_layer in zip(
441+
ip_adapter_image, self.transformer.encoder_hid_proj.image_projection_layers
442+
):
441443
single_image_embeds = self.encode_image(single_ip_adapter_image, device, 1)
444+
442445
image_embeds.append(single_image_embeds[None, :])
443446
else:
444-
if not isinstance(ip_adapter_image_embeds, list):
445-
ip_adapter_image_embeds = [ip_adapter_image_embeds]
446-
447-
if len(ip_adapter_image_embeds) != self.transformer.encoder_hid_proj.num_ip_adapters:
448-
raise ValueError(
449-
f"`ip_adapter_image_embeds` must have same length as the number of IP Adapters. Got {len(ip_adapter_image_embeds)} image embeds and {self.transformer.encoder_hid_proj.num_ip_adapters} IP Adapters."
450-
)
451-
452447
for single_image_embeds in ip_adapter_image_embeds:
453448
image_embeds.append(single_image_embeds)
454449

455450
ip_adapter_image_embeds = []
456-
for single_image_embeds in image_embeds:
451+
for i, single_image_embeds in enumerate(image_embeds):
457452
single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
458453
single_image_embeds = single_image_embeds.to(device=device)
459454
ip_adapter_image_embeds.append(single_image_embeds)

0 commit comments

Comments
 (0)