Skip to content

Commit da6fd60

Browse files
committed
fix
1 parent ff4bff9 commit da6fd60

File tree

1 file changed

+3
-6
lines changed

1 file changed

+3
-6
lines changed

src/diffusers/pipelines/lumina2/pipeline_lumina2.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -637,11 +637,6 @@ def __call__(
637637

638638
device = self._execution_device
639639

640-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
641-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
642-
# corresponds to doing no classifier free guidance.
643-
do_classifier_free_guidance = guidance_scale > 1.0
644-
645640
# 3. Encode input prompt
646641
(
647642
prompt_embeds,
@@ -650,7 +645,7 @@ def __call__(
650645
negative_prompt_attention_mask,
651646
) = self.encode_prompt(
652647
prompt,
653-
do_classifier_free_guidance,
648+
self.do_classifier_free_guidance,
654649
negative_prompt=negative_prompt,
655650
num_images_per_prompt=num_images_per_prompt,
656651
device=device,
@@ -730,6 +725,8 @@ def __call__(
730725
cond_norm = torch.norm(noise_pred_cond, dim=-1, keepdim=True)
731726
noise_norm = torch.norm(noise_pred, dim=-1, keepdim=True)
732727
noise_pred = noise_pred * (cond_norm / noise_norm)
728+
else:
729+
noise_pred = noise_pred_cond
733730

734731
# compute the previous noisy sample x_t -> x_t-1
735732
latents_dtype = latents.dtype

0 commit comments

Comments
 (0)