@@ -774,8 +774,6 @@ def __call__(
774774 # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
775775 # corresponds to doing no classifier free guidance.
776776 do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0
777- # check if scheduler is in sigmas space
778- scheduler_is_in_sigma_space = hasattr (self .scheduler , "sigmas" )
779777
780778 # 3. Encode input prompt
781779 text_encoder_lora_scale = (
@@ -906,15 +904,6 @@ def __call__(
906904 return_dict = False ,
907905 )[0 ]
908906
909- # Hack:
910- # For karras style schedulers the model does classifer free guidance using the
911- # predicted_original_sample instead of the noise_pred. So we need to compute the
912- # predicted_original_sample here if we are using a karras style scheduler.
913- if scheduler_is_in_sigma_space :
914- step_index = (self .scheduler .timesteps == t ).nonzero ()[0 ].item ()
915- sigma = self .scheduler .sigmas [step_index ]
916- noise_pred = latent_model_input - sigma * noise_pred
917-
918907 # perform guidance
919908 if do_classifier_free_guidance :
920909 noise_pred_text , noise_pred_image , noise_pred_uncond = noise_pred .chunk (3 )
@@ -928,15 +917,6 @@ def __call__(
928917 # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
929918 noise_pred = rescale_noise_cfg (noise_pred , noise_pred_text , guidance_rescale = guidance_rescale )
930919
931- # Hack:
932- # For karras style schedulers the model does classifer free guidance using the
933- # predicted_original_sample instead of the noise_pred. But the scheduler.step function
934- # expects the noise_pred and computes the predicted_original_sample internally. So we
935- # need to overwrite the noise_pred here such that the value of the computed
936- # predicted_original_sample is correct.
937- if scheduler_is_in_sigma_space :
938- noise_pred = (noise_pred - latents ) / (- sigma )
939-
940920 # compute the previous noisy sample x_t -> x_t-1
941921 latents = self .scheduler .step (noise_pred , t , latents , ** extra_step_kwargs , return_dict = False )[0 ]
942922
0 commit comments