Skip to content

Commit 4f8c133

Browse files
committed
make fix-copies
1 parent 829545d commit 4f8c133

File tree

3 files changed

+103
-28
lines changed

3 files changed

+103
-28
lines changed

src/diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py

Lines changed: 45 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ def __init__(
8787
lower_order_final: bool = True,
8888
euler_at_final: bool = False,
8989
final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"
90+
use_flow_sigmas: bool = False,
9091
):
9192
if solver_type not in ["midpoint", "heun"]:
9293
if solver_type in ["logrho", "bh1", "bh2"]:
@@ -152,23 +153,19 @@ def precondition_noise(self, sigma):
152153
if not isinstance(sigma, torch.Tensor):
153154
sigma = torch.tensor([sigma])
154155

155-
return sigma.atan() / math.pi * 2
156+
if self.config.use_flow_sigmas:
157+
c_noise = sigma / (sigma + 1)
158+
else:
159+
c_noise = sigma.atan() / math.pi * 2
160+
161+
return c_noise
156162

157163
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_outputs
158164
def precondition_outputs(self, sample, model_output, sigma):
159-
sigma_data = self.config.sigma_data
160-
c_skip = sigma_data**2 / (sigma**2 + sigma_data**2)
161-
162-
if self.config.prediction_type == "epsilon":
163-
c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5
164-
elif self.config.prediction_type == "v_prediction":
165-
c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5
165+
if self.config.use_flow_sigmas:
166+
return self._precondition_outputs_flow(sample, model_output, sigma)
166167
else:
167-
raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.")
168-
169-
denoised = c_skip * sample + c_out * model_output
170-
171-
return denoised
168+
return self._precondition_outputs_edm(sample, model_output, sigma)
172169

173170
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.scale_model_input
174171
def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor:
@@ -570,8 +567,42 @@ def add_noise(
570567

571568
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._get_conditioning_c_in
572569
def _get_conditioning_c_in(self, sigma):
573-
c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5)
570+
if self.config.use_flow_sigmas:
571+
t = sigma / (sigma + 1)
572+
c_in = 1.0 - t
573+
else:
574+
c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5)
574575
return c_in
575576

577+
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._precondition_outputs_flow
578+
def _precondition_outputs_flow(self, sample, model_output, sigma):
579+
t = sigma / (sigma + 1)
580+
c_skip = 1.0 - t
581+
582+
if self.config.prediction_type == "epsilon":
583+
c_out = -t
584+
elif self.config.prediction_type == "v_prediction":
585+
c_out = t
586+
else:
587+
raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.")
588+
589+
denoised = c_skip * sample + c_out * model_output
590+
return denoised
591+
592+
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._precondition_outputs_edm
593+
def _precondition_outputs_edm(self, sample, model_output, sigma):
594+
sigma_data = self.config.sigma_data
595+
c_skip = sigma_data**2 / (sigma**2 + sigma_data**2)
596+
597+
if self.config.prediction_type == "epsilon":
598+
c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5
599+
elif self.config.prediction_type == "v_prediction":
600+
c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5
601+
else:
602+
raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.")
603+
604+
denoised = c_skip * sample + c_out * model_output
605+
return denoised
606+
576607
def __len__(self):
577608
return self.config.num_train_timesteps

src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py

Lines changed: 43 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ def __init__(
107107
lower_order_final: bool = True,
108108
euler_at_final: bool = False,
109109
final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"
110+
use_flow_sigmas: bool = False,
110111
):
111112
# settings for DPM-Solver
112113
if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"]:
@@ -185,25 +186,19 @@ def precondition_noise(self, sigma):
185186
if not isinstance(sigma, torch.Tensor):
186187
sigma = torch.tensor([sigma])
187188

188-
c_noise = 0.25 * torch.log(sigma)
189+
if self.config.use_flow_sigmas:
190+
c_noise = sigma / (sigma + 1)
191+
else:
192+
c_noise = 0.25 * torch.log(sigma)
189193

190194
return c_noise
191195

192196
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_outputs
193197
def precondition_outputs(self, sample, model_output, sigma):
194-
sigma_data = self.config.sigma_data
195-
c_skip = sigma_data**2 / (sigma**2 + sigma_data**2)
196-
197-
if self.config.prediction_type == "epsilon":
198-
c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5
199-
elif self.config.prediction_type == "v_prediction":
200-
c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5
198+
if self.config.use_flow_sigmas:
199+
return self._precondition_outputs_flow(sample, model_output, sigma)
201200
else:
202-
raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.")
203-
204-
denoised = c_skip * sample + c_out * model_output
205-
206-
return denoised
201+
return self._precondition_outputs_edm(sample, model_output, sigma)
207202

208203
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.scale_model_input
209204
def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor:
@@ -705,8 +700,42 @@ def add_noise(
705700

706701
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._get_conditioning_c_in
707702
def _get_conditioning_c_in(self, sigma):
708-
c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5)
703+
if self.config.use_flow_sigmas:
704+
t = sigma / (sigma + 1)
705+
c_in = 1.0 - t
706+
else:
707+
c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5)
709708
return c_in
710709

710+
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._precondition_outputs_flow
711+
def _precondition_outputs_flow(self, sample, model_output, sigma):
712+
t = sigma / (sigma + 1)
713+
c_skip = 1.0 - t
714+
715+
if self.config.prediction_type == "epsilon":
716+
c_out = -t
717+
elif self.config.prediction_type == "v_prediction":
718+
c_out = t
719+
else:
720+
raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.")
721+
722+
denoised = c_skip * sample + c_out * model_output
723+
return denoised
724+
725+
# Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._precondition_outputs_edm
726+
def _precondition_outputs_edm(self, sample, model_output, sigma):
727+
sigma_data = self.config.sigma_data
728+
c_skip = sigma_data**2 / (sigma**2 + sigma_data**2)
729+
730+
if self.config.prediction_type == "epsilon":
731+
c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5
732+
elif self.config.prediction_type == "v_prediction":
733+
c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5
734+
else:
735+
raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.")
736+
737+
denoised = c_skip * sample + c_out * model_output
738+
return denoised
739+
711740
def __len__(self):
712741
return self.config.num_train_timesteps

src/diffusers/utils/dummy_torch_and_transformers_objects.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -422,6 +422,21 @@ def from_pretrained(cls, *args, **kwargs):
422422
requires_backends(cls, ["torch", "transformers"])
423423

424424

425+
class Cosmos2VideoToWorldPipeline(metaclass=DummyObject):
426+
_backends = ["torch", "transformers"]
427+
428+
def __init__(self, *args, **kwargs):
429+
requires_backends(self, ["torch", "transformers"])
430+
431+
@classmethod
432+
def from_config(cls, *args, **kwargs):
433+
requires_backends(cls, ["torch", "transformers"])
434+
435+
@classmethod
436+
def from_pretrained(cls, *args, **kwargs):
437+
requires_backends(cls, ["torch", "transformers"])
438+
439+
425440
class CosmosTextToWorldPipeline(metaclass=DummyObject):
426441
_backends = ["torch", "transformers"]
427442

0 commit comments

Comments
 (0)