Skip to content

Commit 9bfddfe

Browse files
authored
Apply suggestions from code review
1 parent c1084b8 commit 9bfddfe

File tree

2 files changed

+0
-47
lines changed

2 files changed

+0
-47
lines changed

src/diffusers/schedulers/scheduling_ddim.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -349,8 +349,6 @@ def step(
349349
generator=None,
350350
variance_noise: Optional[torch.Tensor] = None,
351351
return_dict: bool = True,
352-
_model_output_uncond: Optional[torch.Tensor] = None,
353-
_use_cfgpp: bool = True,
354352
) -> Union[DDIMSchedulerOutput, Tuple]:
355353
"""
356354
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
@@ -388,11 +386,6 @@ def step(
388386
raise ValueError(
389387
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
390388
)
391-
392-
if _use_cfgpp and self.config.prediction_type != "epsilon":
393-
raise ValueError(
394-
f"CFG++ is only supported for prediction type `epsilon`, but got {self.config.prediction_type}."
395-
)
396389

397390
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
398391
# Ideally, read DDIM paper in-detail understanding
@@ -418,7 +411,6 @@ def step(
418411
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
419412
if self.config.prediction_type == "epsilon":
420413
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
421-
pred_epsilon = model_output if not _use_cfgpp else _model_output_uncond
422414
elif self.config.prediction_type == "sample":
423415
pred_original_sample = model_output
424416
pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)

src/diffusers/schedulers/scheduling_euler_discrete.py

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -584,8 +584,6 @@ def step(
584584
s_noise: float = 1.0,
585585
generator: Optional[torch.Generator] = None,
586586
return_dict: bool = True,
587-
_model_output_uncond: Optional[torch.Tensor] = None,
588-
_use_cfgpp: bool = False,
589587
) -> Union[EulerDiscreteSchedulerOutput, Tuple]:
590588
"""
591589
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
@@ -629,11 +627,6 @@ def step(
629627
"The `scale_model_input` function should be called before `step` to ensure correct denoising. "
630628
"See `StableDiffusionPipeline` for a usage example."
631629
)
632-
633-
if _use_cfgpp and self.config.prediction_type != "epsilon":
634-
raise ValueError(
635-
f"CFG++ is only supported for prediction type `epsilon`, but got {self.config.prediction_type}."
636-
)
637630

638631
if self.step_index is None:
639632
self._init_step_index(timestep)
@@ -675,38 +668,6 @@ def step(
675668
dt = self.sigmas[self.step_index + 1] - sigma_hat
676669

677670
prev_sample = sample + derivative * dt
678-
if _use_cfgpp:
679-
prev_sample = prev_sample + (_model_output_uncond - model_output) * self.sigmas[self.step_index + 1]
680-
681-
# denoised = sample - model_output * sigmas[i]
682-
# d = (sample - denoised) / sigmas[i]
683-
# new_sample = denoised + d * sigmas[i + 1]
684-
685-
# new_sample = denoised + (sample - denoised) * sigmas[i + 1] / sigmas[i]
686-
# new_sample = sample - model_output * sigmas[i] + model_output * sigmas[i + 1]
687-
# new_sample = sample + model_output * (sigmas[i + 1] - sigmas[i])
688-
# new_sample = sample - model_output * sigmas[i] + model_output * sigmas[i + 1] --- (1)
689-
690-
# CFG++ =====
691-
# denoised = sample - model_output * sigmas[i]
692-
# uncond_denoised = sample - model_output_uncond * sigmas[i]
693-
# d = (sample - uncond_denoised) / sigmas[i]
694-
# new_sample = denoised + d * sigmas[i + 1]
695-
696-
# new_sample = denoised + (sample - uncond_denoised) * sigmas[i + 1] / sigmas[i]
697-
# new_sample = sample - model_output * sigmas[i] + model_output_uncond * sigmas[i + 1] --- (2)
698-
699-
# To go from (1) to (2):
700-
# new_sample_2 = new_sample_1 - model_output * sigmas[i + 1] + model_output_uncond * sigmas[i + 1]
701-
# new_sample_2 = new_sample_1 + (model_output_uncond - model_output) * sigmas[i + 1]
702-
# new_sample_2 = new_sample_1 + diff * sigmas[i + 1]
703-
704-
# diff = model_output_uncond - model_output
705-
# diff = model_output_uncond - (model_output_uncond + g * (model_output_cond - model_output_uncond))
706-
# diff = model_output_uncond - (g * model_output_cond + (1 - g) * model_output_uncond)
707-
# diff = model_output_uncond - g * model_output_cond + (g - 1) * model_output_uncond
708-
# diff = g * (model_output_uncond - model_output_cond)
709-
710671
# Cast sample back to model compatible dtype
711672
prev_sample = prev_sample.to(model_output.dtype)
712673

0 commit comments

Comments
 (0)