@@ -266,17 +266,17 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
266266 )
267267
268268 sigmas = np .array (((1 - self .alphas_cumprod ) / self .alphas_cumprod ) ** 0.5 )
269+ log_sigmas = np .log (sigmas )
269270 if self .config .use_karras_sigmas :
270- log_sigmas = np .log (sigmas )
271271 sigmas = np .flip (sigmas ).copy ()
272272 sigmas = self ._convert_to_karras (in_sigmas = sigmas , num_inference_steps = num_inference_steps )
273273 timesteps = np .array ([self ._sigma_to_t (sigma , log_sigmas ) for sigma in sigmas ]).round ()
274274 sigmas = np .concatenate ([sigmas , sigmas [- 1 :]]).astype (np .float32 )
275275 elif self .config .use_exponential_sigmas :
276- sigmas = self ._convert_to_exponential (in_sigmas = sigmas , num_inference_steps = self . num_inference_steps )
276+ sigmas = self ._convert_to_exponential (in_sigmas = sigmas , num_inference_steps = num_inference_steps )
277277 timesteps = np .array ([self ._sigma_to_t (sigma , log_sigmas ) for sigma in sigmas ])
278278 elif self .config .use_beta_sigmas :
279- sigmas = self ._convert_to_beta (in_sigmas = sigmas , num_inference_steps = self . num_inference_steps )
279+ sigmas = self ._convert_to_beta (in_sigmas = sigmas , num_inference_steps = num_inference_steps )
280280 timesteps = np .array ([self ._sigma_to_t (sigma , log_sigmas ) for sigma in sigmas ])
281281 else :
282282 sigmas = np .interp (timesteps , np .arange (0 , len (sigmas )), sigmas )
@@ -408,7 +408,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
408408 sigma_min = sigma_min if sigma_min is not None else in_sigmas [- 1 ].item ()
409409 sigma_max = sigma_max if sigma_max is not None else in_sigmas [0 ].item ()
410410
411- sigmas = torch . linspace (math .log (sigma_max ), math .log (sigma_min ), num_inference_steps ). exp ( )
411+ sigmas = np . exp ( np . linspace (math .log (sigma_max ), math .log (sigma_min ), num_inference_steps ))
412412 return sigmas
413413
414414 # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -432,7 +432,7 @@ def _convert_to_beta(
432432 sigma_min = sigma_min if sigma_min is not None else in_sigmas [- 1 ].item ()
433433 sigma_max = sigma_max if sigma_max is not None else in_sigmas [0 ].item ()
434434
435- sigmas = torch . Tensor (
435+ sigmas = np . array (
436436 [
437437 sigma_min + (ppf * (sigma_max - sigma_min ))
438438 for ppf in [
0 commit comments