Skip to content

Commit ec5ab1a

Browse files
authored
common : fix context shift help message (ggml-org#15448)
Signed-off-by: Jie Fu <[email protected]>
1 parent 1a99c2d commit ec5ab1a

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

common/arg.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1532,7 +1532,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
15321532
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT"));
15331533
add_opt(common_arg(
15341534
{"--context-shift"},
1535-
string_format("enables context shift on infinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),
1535+
string_format("enables context shift on infinite text generation (default: %s)", params.ctx_shift ? "enabled" : "disabled"),
15361536
[](common_params & params) {
15371537
params.ctx_shift = true;
15381538
}

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,7 @@ struct common_params {
375375
bool cont_batching = true; // insert new sequences for decoding on-the-fly
376376
bool flash_attn = false; // flash attention
377377
bool no_perf = false; // disable performance metrics
378-
bool ctx_shift = false; // context shift on inifinite text generation
378+
bool ctx_shift = false; // context shift on infinite text generation
379379
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
380380
bool kv_unified = false; // enable unified KV cache
381381

0 commit comments

Comments
 (0)