diff --git a/denoiser.hpp b/denoiser.hpp index e64b292d1..3c53301bc 100644 --- a/denoiser.hpp +++ b/denoiser.hpp @@ -770,7 +770,6 @@ static void sample_k_diffusion(sample_method_t method, } break; case DPMPP2S_A: { struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, x); - struct ggml_tensor* d = ggml_dup_tensor(work_ctx, x); struct ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x); for (int i = 0; i < steps; i++) { @@ -785,22 +784,15 @@ static void sample_k_diffusion(sample_method_t method, auto sigma_fn = [](float t) -> float { return exp(-t); }; if (sigma_down == 0) { - // Euler step - float* vec_d = (float*)d->data; + // d = (x - denoised) / sigmas[i]; + // dt = sigma_down - sigmas[i]; + // x += d * dt; + // => x = denoised float* vec_x = (float*)x->data; float* vec_denoised = (float*)denoised->data; - for (int j = 0; j < ggml_nelements(d); j++) { - vec_d[j] = (vec_x[j] - vec_denoised[j]) / sigmas[i]; - } - - // TODO: If sigma_down == 0, isn't this wrong? - // But - // https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py#L525 - // has this exactly the same way. - float dt = sigma_down - sigmas[i]; - for (int j = 0; j < ggml_nelements(d); j++) { - vec_x[j] = vec_x[j] + vec_d[j] * dt; + for (int j = 0; j < ggml_nelements(x); j++) { + vec_x[j] = vec_denoised[j]; } } else { // DPM-Solver++(2S) @@ -809,7 +801,6 @@ static void sample_k_diffusion(sample_method_t method, float h = t_next - t; float s = t + 0.5f * h; - float* vec_d = (float*)d->data; float* vec_x = (float*)x->data; float* vec_x2 = (float*)x2->data; float* vec_denoised = (float*)denoised->data;