@@ -266,18 +266,22 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
266266            )
267267
268268        sigmas  =  np .array (((1  -  self .alphas_cumprod ) /  self .alphas_cumprod ) **  0.5 )
269+         log_sigmas  =  np .log (sigmas )
269270        if  self .config .use_karras_sigmas :
270-             log_sigmas  =  np .log (sigmas )
271271            sigmas  =  np .flip (sigmas ).copy ()
272272            sigmas  =  self ._convert_to_karras (in_sigmas = sigmas , num_inference_steps = num_inference_steps )
273273            timesteps  =  np .array ([self ._sigma_to_t (sigma , log_sigmas ) for  sigma  in  sigmas ]).round ()
274274            sigmas  =  np .concatenate ([sigmas , sigmas [- 1 :]]).astype (np .float32 )
275275        elif  self .config .use_exponential_sigmas :
276-             sigmas  =  self ._convert_to_exponential (in_sigmas = sigmas , num_inference_steps = self .num_inference_steps )
276+             sigmas  =  np .flip (sigmas ).copy ()
277+             sigmas  =  self ._convert_to_exponential (in_sigmas = sigmas , num_inference_steps = num_inference_steps )
277278            timesteps  =  np .array ([self ._sigma_to_t (sigma , log_sigmas ) for  sigma  in  sigmas ])
279+             sigmas  =  np .concatenate ([sigmas , sigmas [- 1 :]]).astype (np .float32 )
278280        elif  self .config .use_beta_sigmas :
279-             sigmas  =  self ._convert_to_beta (in_sigmas = sigmas , num_inference_steps = self .num_inference_steps )
281+             sigmas  =  np .flip (sigmas ).copy ()
282+             sigmas  =  self ._convert_to_beta (in_sigmas = sigmas , num_inference_steps = num_inference_steps )
280283            timesteps  =  np .array ([self ._sigma_to_t (sigma , log_sigmas ) for  sigma  in  sigmas ])
284+             sigmas  =  np .concatenate ([sigmas , sigmas [- 1 :]]).astype (np .float32 )
281285        else :
282286            sigmas  =  np .interp (timesteps , np .arange (0 , len (sigmas )), sigmas )
283287            sigma_last  =  ((1  -  self .alphas_cumprod [0 ]) /  self .alphas_cumprod [0 ]) **  0.5 
@@ -408,7 +412,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
408412        sigma_min  =  sigma_min  if  sigma_min  is  not   None  else  in_sigmas [- 1 ].item ()
409413        sigma_max  =  sigma_max  if  sigma_max  is  not   None  else  in_sigmas [0 ].item ()
410414
411-         sigmas  =  torch . linspace (math .log (sigma_max ), math .log (sigma_min ), num_inference_steps ). exp ( )
415+         sigmas  =  np . exp ( np . linspace (math .log (sigma_max ), math .log (sigma_min ), num_inference_steps ))
412416        return  sigmas 
413417
414418    # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta 
@@ -432,7 +436,7 @@ def _convert_to_beta(
432436        sigma_min  =  sigma_min  if  sigma_min  is  not   None  else  in_sigmas [- 1 ].item ()
433437        sigma_max  =  sigma_max  if  sigma_max  is  not   None  else  in_sigmas [0 ].item ()
434438
435-         sigmas  =  torch . Tensor (
439+         sigmas  =  np . array (
436440            [
437441                sigma_min  +  (ppf  *  (sigma_max  -  sigma_min ))
438442                for  ppf  in  [
0 commit comments