Skip to content

Commit 637e230

Browse files
hlkysayakpaul
andauthored
Fix beta and exponential sigmas + add tests (huggingface#9954)
* Fix beta and exponential sigmas + add tests --------- Co-authored-by: Sayak Paul <[email protected]>
1 parent 99c0483 commit 637e230

24 files changed

+157
-51
lines changed

src/diffusers/schedulers/scheduling_deis_multistep.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -266,18 +266,22 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
266266
)
267267

268268
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
269+
log_sigmas = np.log(sigmas)
269270
if self.config.use_karras_sigmas:
270-
log_sigmas = np.log(sigmas)
271271
sigmas = np.flip(sigmas).copy()
272272
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
273273
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
274274
sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
275275
elif self.config.use_exponential_sigmas:
276-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
276+
sigmas = np.flip(sigmas).copy()
277+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
277278
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
279+
sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
278280
elif self.config.use_beta_sigmas:
279-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
281+
sigmas = np.flip(sigmas).copy()
282+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
280283
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
284+
sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
281285
else:
282286
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
283287
sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5
@@ -408,7 +412,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
408412
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
409413
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
410414

411-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
415+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
412416
return sigmas
413417

414418
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -432,7 +436,7 @@ def _convert_to_beta(
432436
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
433437
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
434438

435-
sigmas = torch.Tensor(
439+
sigmas = np.array(
436440
[
437441
sigma_min + (ppf * (sigma_max - sigma_min))
438442
for ppf in [

src/diffusers/schedulers/scheduling_dpmsolver_multistep.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -400,10 +400,12 @@ def set_timesteps(
400400
sigmas = np.exp(lambdas)
401401
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
402402
elif self.config.use_exponential_sigmas:
403-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
403+
sigmas = np.flip(sigmas).copy()
404+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
404405
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
405406
elif self.config.use_beta_sigmas:
406-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
407+
sigmas = np.flip(sigmas).copy()
408+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
407409
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
408410
else:
409411
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
@@ -556,7 +558,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
556558
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
557559
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
558560

559-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
561+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
560562
return sigmas
561563

562564
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -580,7 +582,7 @@ def _convert_to_beta(
580582
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
581583
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
582584

583-
sigmas = torch.Tensor(
585+
sigmas = np.array(
584586
[
585587
sigma_min + (ppf * (sigma_max - sigma_min))
586588
for ppf in [

src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -287,10 +287,10 @@ def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torc
287287
timesteps = timesteps.copy().astype(np.int64)
288288
sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
289289
elif self.config.use_exponential_sigmas:
290-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
290+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
291291
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
292292
elif self.config.use_beta_sigmas:
293-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
293+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
294294
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
295295
else:
296296
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
@@ -429,7 +429,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
429429
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
430430
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
431431

432-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
432+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
433433
return sigmas
434434

435435
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -453,7 +453,7 @@ def _convert_to_beta(
453453
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
454454
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
455455

456-
sigmas = torch.Tensor(
456+
sigmas = np.array(
457457
[
458458
sigma_min + (ppf * (sigma_max - sigma_min))
459459
for ppf in [

src/diffusers/schedulers/scheduling_dpmsolver_sde.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -380,10 +380,10 @@ def set_timesteps(
380380
sigmas = self._convert_to_karras(in_sigmas=sigmas)
381381
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
382382
elif self.config.use_exponential_sigmas:
383-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
383+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
384384
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
385385
elif self.config.use_beta_sigmas:
386-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
386+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
387387
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
388388

389389
second_order_timesteps = self._second_order_timesteps(sigmas, log_sigmas)
@@ -484,7 +484,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
484484
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
485485
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
486486

487-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
487+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
488488
return sigmas
489489

490490
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -508,7 +508,7 @@ def _convert_to_beta(
508508
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
509509
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
510510

511-
sigmas = torch.Tensor(
511+
sigmas = np.array(
512512
[
513513
sigma_min + (ppf * (sigma_max - sigma_min))
514514
for ppf in [

src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -339,16 +339,18 @@ def set_timesteps(
339339
)
340340

341341
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
342+
log_sigmas = np.log(sigmas)
342343
if self.config.use_karras_sigmas:
343-
log_sigmas = np.log(sigmas)
344344
sigmas = np.flip(sigmas).copy()
345345
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
346346
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
347347
elif self.config.use_exponential_sigmas:
348-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
348+
sigmas = np.flip(sigmas).copy()
349+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
349350
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
350351
elif self.config.use_beta_sigmas:
351-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
352+
sigmas = np.flip(sigmas).copy()
353+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
352354
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
353355
else:
354356
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
@@ -498,7 +500,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
498500
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
499501
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
500502

501-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
503+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
502504
return sigmas
503505

504506
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -522,7 +524,7 @@ def _convert_to_beta(
522524
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
523525
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
524526

525-
sigmas = torch.Tensor(
527+
sigmas = np.array(
526528
[
527529
sigma_min + (ppf * (sigma_max - sigma_min))
528530
for ppf in [

src/diffusers/schedulers/scheduling_euler_discrete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -419,11 +419,11 @@ def set_timesteps(
419419
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
420420

421421
elif self.config.use_exponential_sigmas:
422-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
422+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
423423
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
424424

425425
elif self.config.use_beta_sigmas:
426-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
426+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
427427
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
428428

429429
if self.config.final_sigmas_type == "sigma_min":
@@ -517,7 +517,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
517517
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
518518
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
519519

520-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
520+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
521521
return sigmas
522522

523523
def _convert_to_beta(
@@ -540,7 +540,7 @@ def _convert_to_beta(
540540
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
541541
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
542542

543-
sigmas = torch.Tensor(
543+
sigmas = np.array(
544544
[
545545
sigma_min + (ppf * (sigma_max - sigma_min))
546546
for ppf in [

src/diffusers/schedulers/scheduling_heun_discrete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -329,10 +329,10 @@ def set_timesteps(
329329
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
330330
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
331331
elif self.config.use_exponential_sigmas:
332-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
332+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
333333
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
334334
elif self.config.use_beta_sigmas:
335-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
335+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
336336
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
337337

338338
sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
@@ -421,7 +421,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
421421
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
422422
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
423423

424-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
424+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
425425
return sigmas
426426

427427
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -445,7 +445,7 @@ def _convert_to_beta(
445445
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
446446
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
447447

448-
sigmas = torch.Tensor(
448+
sigmas = np.array(
449449
[
450450
sigma_min + (ppf * (sigma_max - sigma_min))
451451
for ppf in [

src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -289,10 +289,10 @@ def set_timesteps(
289289
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
290290
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
291291
elif self.config.use_exponential_sigmas:
292-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
292+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
293293
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
294294
elif self.config.use_beta_sigmas:
295-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
295+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
296296
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
297297

298298
self.log_sigmas = torch.from_numpy(log_sigmas).to(device)
@@ -409,7 +409,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
409409
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
410410
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
411411

412-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
412+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
413413
return sigmas
414414

415415
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -433,7 +433,7 @@ def _convert_to_beta(
433433
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
434434
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
435435

436-
sigmas = torch.Tensor(
436+
sigmas = np.array(
437437
[
438438
sigma_min + (ppf * (sigma_max - sigma_min))
439439
for ppf in [

src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -288,10 +288,10 @@ def set_timesteps(
288288
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
289289
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
290290
elif self.config.use_exponential_sigmas:
291-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
291+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
292292
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
293293
elif self.config.use_beta_sigmas:
294-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
294+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
295295
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
296296

297297
self.log_sigmas = torch.from_numpy(log_sigmas).to(device=device)
@@ -422,7 +422,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
422422
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
423423
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
424424

425-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
425+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
426426
return sigmas
427427

428428
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -446,7 +446,7 @@ def _convert_to_beta(
446446
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
447447
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
448448

449-
sigmas = torch.Tensor(
449+
sigmas = np.array(
450450
[
451451
sigma_min + (ppf * (sigma_max - sigma_min))
452452
for ppf in [

0 commit comments

Comments
 (0)