From b4f7dcf3ce4e438a97e8a071bd1311e2d603dda1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 2 Jun 2025 15:49:59 +0300 Subject: [PATCH 1/2] server : use swa-full fo draft context ggml-ci --- tools/server/server.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 4b92eeac9499b..362c95881b5b8 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -1938,6 +1938,7 @@ struct server_context { params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? params_base.n_ctx / params_base.n_parallel : params_base.speculative.n_ctx; params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers; params_dft.n_parallel = 1; + params_dft.swa_full = true; // TODO: this is not optimal and can be improved // force F16 KV cache for the draft model for extra performance params_dft.cache_type_k = GGML_TYPE_F16; @@ -3203,9 +3204,7 @@ struct server_context { } } else { // if we don't cache the prompt, we have to remove the entire KV cache - llama_kv_self_seq_rm(ctx, slot.id, 0, -1); slot.n_past = 0; - slot.cache_tokens.clear(); // TODO: not needed, will be cleared later via "keep_first()" } if (slot.n_past > 0 && slot.n_past < (int) slot.cache_tokens.size()) { @@ -3220,7 +3219,6 @@ struct server_context { SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min, n_swa); SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n", "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055"); - llama_kv_self_seq_rm(ctx, slot.id, 0, -1); slot.n_past = 0; } } From 272df3f94b42d53a9b779270cdc0b9d5c16ff56a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 2 Jun 2025 21:05:38 +0300 Subject: [PATCH 2/2] server : disable speculative decoding for SWA models --- tools/server/server.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 362c95881b5b8..512f319165eae 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -1938,7 +1938,6 @@ struct server_context { params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? params_base.n_ctx / params_base.n_parallel : params_base.speculative.n_ctx; params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers; params_dft.n_parallel = 1; - params_dft.swa_full = true; // TODO: this is not optimal and can be improved // force F16 KV cache for the draft model for extra performance params_dft.cache_type_k = GGML_TYPE_F16; @@ -2017,6 +2016,11 @@ struct server_context { params_base.n_cache_reuse = 0; SRV_WRN("%s\n", "cache_reuse is not supported by this context, it will be disabled"); } + + if (!params_base.speculative.model.path.empty()) { + SRV_ERR("%s\n", "err: speculative decode is not supported by this context"); + return false; + } } return true;