Skip to content

Commit b4f7dcf

Browse files
committed
server : use swa-full fo draft context
ggml-ci
1 parent 093e3f1 commit b4f7dcf

File tree

1 file changed

+1
-3
lines changed

1 file changed

+1
-3
lines changed

tools/server/server.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1938,6 +1938,7 @@ struct server_context {
19381938
params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? params_base.n_ctx / params_base.n_parallel : params_base.speculative.n_ctx;
19391939
params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers;
19401940
params_dft.n_parallel = 1;
1941+
params_dft.swa_full = true; // TODO: this is not optimal and can be improved
19411942

19421943
// force F16 KV cache for the draft model for extra performance
19431944
params_dft.cache_type_k = GGML_TYPE_F16;
@@ -3203,9 +3204,7 @@ struct server_context {
32033204
}
32043205
} else {
32053206
// if we don't cache the prompt, we have to remove the entire KV cache
3206-
llama_kv_self_seq_rm(ctx, slot.id, 0, -1);
32073207
slot.n_past = 0;
3208-
slot.cache_tokens.clear(); // TODO: not needed, will be cleared later via "keep_first()"
32093208
}
32103209

32113210
if (slot.n_past > 0 && slot.n_past < (int) slot.cache_tokens.size()) {
@@ -3220,7 +3219,6 @@ struct server_context {
32203219
SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min, n_swa);
32213220
SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n",
32223221
"https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055");
3223-
llama_kv_self_seq_rm(ctx, slot.id, 0, -1);
32243222
slot.n_past = 0;
32253223
}
32263224
}

0 commit comments

Comments
 (0)