Skip to content

Commit 6a5c4f4

Browse files
committed
Fix beta and exponential sigmas + add tests
1 parent 3b28306 commit 6a5c4f4

24 files changed

+124
-52
lines changed

src/diffusers/schedulers/scheduling_deis_multistep.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -266,17 +266,17 @@ def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.devic
266266
)
267267

268268
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
269+
log_sigmas = np.log(sigmas)
269270
if self.config.use_karras_sigmas:
270-
log_sigmas = np.log(sigmas)
271271
sigmas = np.flip(sigmas).copy()
272272
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
273273
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
274274
sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
275275
elif self.config.use_exponential_sigmas:
276-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
276+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
277277
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
278278
elif self.config.use_beta_sigmas:
279-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
279+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
280280
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
281281
else:
282282
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
@@ -408,7 +408,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
408408
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
409409
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
410410

411-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
411+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
412412
return sigmas
413413

414414
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -432,7 +432,7 @@ def _convert_to_beta(
432432
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
433433
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
434434

435-
sigmas = torch.Tensor(
435+
sigmas = np.array(
436436
[
437437
sigma_min + (ppf * (sigma_max - sigma_min))
438438
for ppf in [

src/diffusers/schedulers/scheduling_dpmsolver_multistep.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -400,10 +400,10 @@ def set_timesteps(
400400
sigmas = np.exp(lambdas)
401401
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
402402
elif self.config.use_exponential_sigmas:
403-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
403+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
404404
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
405405
elif self.config.use_beta_sigmas:
406-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
406+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
407407
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
408408
else:
409409
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
@@ -556,7 +556,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
556556
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
557557
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
558558

559-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
559+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
560560
return sigmas
561561

562562
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -580,7 +580,7 @@ def _convert_to_beta(
580580
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
581581
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
582582

583-
sigmas = torch.Tensor(
583+
sigmas = np.array(
584584
[
585585
sigma_min + (ppf * (sigma_max - sigma_min))
586586
for ppf in [

src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -287,10 +287,10 @@ def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torc
287287
timesteps = timesteps.copy().astype(np.int64)
288288
sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32)
289289
elif self.config.use_exponential_sigmas:
290-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
290+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
291291
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
292292
elif self.config.use_beta_sigmas:
293-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
293+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
294294
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
295295
else:
296296
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
@@ -429,7 +429,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
429429
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
430430
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
431431

432-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
432+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
433433
return sigmas
434434

435435
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -453,7 +453,7 @@ def _convert_to_beta(
453453
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
454454
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
455455

456-
sigmas = torch.Tensor(
456+
sigmas = np.array(
457457
[
458458
sigma_min + (ppf * (sigma_max - sigma_min))
459459
for ppf in [

src/diffusers/schedulers/scheduling_dpmsolver_sde.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -380,10 +380,10 @@ def set_timesteps(
380380
sigmas = self._convert_to_karras(in_sigmas=sigmas)
381381
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
382382
elif self.config.use_exponential_sigmas:
383-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
383+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
384384
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
385385
elif self.config.use_beta_sigmas:
386-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
386+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
387387
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
388388

389389
second_order_timesteps = self._second_order_timesteps(sigmas, log_sigmas)
@@ -484,7 +484,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
484484
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
485485
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
486486

487-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
487+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
488488
return sigmas
489489

490490
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -508,7 +508,7 @@ def _convert_to_beta(
508508
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
509509
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
510510

511-
sigmas = torch.Tensor(
511+
sigmas = np.array(
512512
[
513513
sigma_min + (ppf * (sigma_max - sigma_min))
514514
for ppf in [

src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -339,16 +339,16 @@ def set_timesteps(
339339
)
340340

341341
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
342+
log_sigmas = np.log(sigmas)
342343
if self.config.use_karras_sigmas:
343-
log_sigmas = np.log(sigmas)
344344
sigmas = np.flip(sigmas).copy()
345345
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
346346
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
347347
elif self.config.use_exponential_sigmas:
348-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
348+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
349349
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
350350
elif self.config.use_beta_sigmas:
351-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
351+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
352352
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
353353
else:
354354
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
@@ -498,7 +498,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
498498
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
499499
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
500500

501-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
501+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
502502
return sigmas
503503

504504
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -522,7 +522,7 @@ def _convert_to_beta(
522522
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
523523
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
524524

525-
sigmas = torch.Tensor(
525+
sigmas = np.array(
526526
[
527527
sigma_min + (ppf * (sigma_max - sigma_min))
528528
for ppf in [

src/diffusers/schedulers/scheduling_euler_discrete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -419,11 +419,11 @@ def set_timesteps(
419419
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
420420

421421
elif self.config.use_exponential_sigmas:
422-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
422+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
423423
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
424424

425425
elif self.config.use_beta_sigmas:
426-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
426+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
427427
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
428428

429429
if self.config.final_sigmas_type == "sigma_min":
@@ -517,7 +517,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
517517
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
518518
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
519519

520-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
520+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
521521
return sigmas
522522

523523
def _convert_to_beta(
@@ -540,7 +540,7 @@ def _convert_to_beta(
540540
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
541541
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
542542

543-
sigmas = torch.Tensor(
543+
sigmas = np.array(
544544
[
545545
sigma_min + (ppf * (sigma_max - sigma_min))
546546
for ppf in [

src/diffusers/schedulers/scheduling_heun_discrete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -329,10 +329,10 @@ def set_timesteps(
329329
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
330330
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
331331
elif self.config.use_exponential_sigmas:
332-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
332+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
333333
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
334334
elif self.config.use_beta_sigmas:
335-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
335+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
336336
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
337337

338338
sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
@@ -421,7 +421,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
421421
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
422422
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
423423

424-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
424+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
425425
return sigmas
426426

427427
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -445,7 +445,7 @@ def _convert_to_beta(
445445
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
446446
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
447447

448-
sigmas = torch.Tensor(
448+
sigmas = np.array(
449449
[
450450
sigma_min + (ppf * (sigma_max - sigma_min))
451451
for ppf in [

src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -289,10 +289,10 @@ def set_timesteps(
289289
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
290290
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
291291
elif self.config.use_exponential_sigmas:
292-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
292+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
293293
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
294294
elif self.config.use_beta_sigmas:
295-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
295+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
296296
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
297297

298298
self.log_sigmas = torch.from_numpy(log_sigmas).to(device)
@@ -409,7 +409,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
409409
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
410410
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
411411

412-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
412+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
413413
return sigmas
414414

415415
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -433,7 +433,7 @@ def _convert_to_beta(
433433
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
434434
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
435435

436-
sigmas = torch.Tensor(
436+
sigmas = np.array(
437437
[
438438
sigma_min + (ppf * (sigma_max - sigma_min))
439439
for ppf in [

src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -288,10 +288,10 @@ def set_timesteps(
288288
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
289289
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
290290
elif self.config.use_exponential_sigmas:
291-
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
291+
sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
292292
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
293293
elif self.config.use_beta_sigmas:
294-
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
294+
sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
295295
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])
296296

297297
self.log_sigmas = torch.from_numpy(log_sigmas).to(device=device)
@@ -422,7 +422,7 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
422422
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
423423
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
424424

425-
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps).exp()
425+
sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
426426
return sigmas
427427

428428
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
@@ -446,7 +446,7 @@ def _convert_to_beta(
446446
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
447447
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
448448

449-
sigmas = torch.Tensor(
449+
sigmas = np.array(
450450
[
451451
sigma_min + (ppf * (sigma_max - sigma_min))
452452
for ppf in [

0 commit comments

Comments
 (0)