Skip to content

Commit a34cde9

Browse files
committed
server : add option to time limit the generation phase
ggml-ci
1 parent 95c76e8 commit a34cde9

File tree

2 files changed

+46
-6
lines changed

2 files changed

+46
-6
lines changed

examples/server/README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -376,6 +376,8 @@ node index.js
376376

377377
`min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0`
378378

379+
`t_max_predict_ms`: Set a time limit in milliseconds for the prediction (a.k.a. text-generation) phase. The timeout will trigger if the generation takes more than the specified time (measured since the first token was generated) and if a new-line character has already been generated. Useful for FIM applications. Default: `0`, which is disabled.
380+
379381
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
380382

381383
`id_slot`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot. Default: `-1`

examples/server/server.cpp

Lines changed: 44 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,12 @@ struct slot_params {
128128
bool stream = true;
129129
bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
130130

131-
int32_t n_keep = 0; // number of tokens to keep from initial prompt
132-
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
133-
int32_t n_predict = -1; // new tokens to predict
131+
int32_t n_keep = 0; // number of tokens to keep from initial prompt
132+
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
133+
int32_t n_predict = -1; // new tokens to predict
134+
135+
int64_t t_max_prompt_ms = -1; // TODO: implement
136+
int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit
134137

135138
std::vector<std::string> antiprompt;
136139

@@ -175,6 +178,7 @@ struct server_slot {
175178
server_task_cmpl_type cmpl_type = SERVER_TASK_CMPL_TYPE_NORMAL;
176179

177180
bool has_next_token = true;
181+
bool has_new_line = false;
178182
bool truncated = false;
179183
bool stopped_eos = false;
180184
bool stopped_word = false;
@@ -216,6 +220,7 @@ struct server_slot {
216220

217221
n_prompt_tokens = 0;
218222
generated_text = "";
223+
has_new_line = false;
219224
truncated = false;
220225
stopped_eos = false;
221226
stopped_word = false;
@@ -898,6 +903,8 @@ struct server_context {
898903
slot.sparams.seed = json_value(data, "seed", default_sparams.seed);
899904
slot.sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
900905
slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
906+
//slot.params.t_max_prompt_ms = json_value(data, "t_max_prompt_ms", default_params.t_max_prompt_ms); // TODO: implement
907+
slot.params.t_max_predict_ms = json_value(data, "t_max_predict_ms", default_params.t_max_predict_ms);
901908

902909
// process "json_schema" and "grammar"
903910
if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
@@ -1130,6 +1137,20 @@ struct server_context {
11301137
SLT_DBG(slot, "stopped by limit, n_decoded = %d, n_predict = %d\n", slot.n_decoded, slot.params.n_predict);
11311138
}
11321139

1140+
// if we have already seen a new line, we stop after a certain time limit
1141+
if (slot.has_new_line && slot.params.t_max_predict_ms > 0 &&
1142+
(ggml_time_us() - slot.t_start_generation > 1000.0f*slot.params.t_max_predict_ms)) {
1143+
slot.stopped_limit = true;
1144+
slot.has_next_token = false;
1145+
1146+
SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded, (int) slot.params.t_max_predict_ms);
1147+
}
1148+
1149+
// check if there is a new line in the generated text
1150+
if (result.text_to_send.find('\n') != std::string::npos) {
1151+
slot.has_new_line = true;
1152+
}
1153+
11331154
// if context shift is disabled, we stop when it reaches the context limit
11341155
if (slot.n_decoded >= slot.n_ctx) {
11351156
slot.truncated = true;
@@ -1278,6 +1299,7 @@ struct server_context {
12781299
{"tokens_evaluated", slot.n_prompt_tokens},
12791300
{"generation_settings", get_formated_generation(slot)},
12801301
{"prompt", slot.prompt},
1302+
{"has_new_line", slot.has_new_line},
12811303
{"truncated", slot.truncated},
12821304
{"stopped_eos", slot.stopped_eos},
12831305
{"stopped_word", slot.stopped_word},
@@ -1604,6 +1626,7 @@ struct server_context {
16041626
slot_data["prompt"] = slot.prompt;
16051627
slot_data["next_token"] = {
16061628
{"has_next_token", slot.has_next_token},
1629+
{"has_new_line", slot.has_new_line},
16071630
{"n_remain", slot.n_remaining},
16081631
{"n_decoded", slot.n_decoded},
16091632
{"stopped_eos", slot.stopped_eos},
@@ -1946,6 +1969,13 @@ struct server_context {
19461969
auto prefix_tokens = tokenize(slot.params.input_prefix, false, false);
19471970
auto suffix_tokens = tokenize(slot.params.input_suffix, false, false);
19481971

1972+
// for now pick context to fit in a single batch (ratio prefix:suffix = 3:1, TODO: configurable?)
1973+
const int n_suffix_take = std::min<int>(suffix_tokens.size(), n_batch/4);
1974+
const int n_prefix_take = std::min<int>(prefix_tokens.size(), (n_batch - 3) - n_suffix_take);
1975+
1976+
prefix_tokens.erase(prefix_tokens.begin(), prefix_tokens.begin() + prefix_tokens.size() - n_prefix_take);
1977+
suffix_tokens.resize(n_suffix_take);
1978+
19491979
prefix_tokens.insert(prefix_tokens.begin(), llama_token_fim_pre(model));
19501980
suffix_tokens.insert(suffix_tokens.begin(), llama_token_fim_suf(model));
19511981

@@ -1968,9 +1998,17 @@ struct server_context {
19681998

19691999
SLT_INF(slot, "prompt tokenized, n_ctx_slot = %d, n_keep = %d, n_prompt_tokens = %d\n", slot.n_ctx, slot.params.n_keep, slot.n_prompt_tokens);
19702000

1971-
// print prompt tokens:
1972-
for (int i = 0; i < (int) prompt_tokens.size(); i++) {
1973-
SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
2001+
// print prompt tokens (for debugging)
2002+
if (1) {
2003+
// first 16 tokens (avoid flooding logs)
2004+
for (int i = 0; i < std::min<int>(16, prompt_tokens.size()); i++) {
2005+
SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
2006+
}
2007+
} else {
2008+
// all
2009+
for (int i = 0; i < (int) prompt_tokens.size(); i++) {
2010+
SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str());
2011+
}
19742012
}
19752013

19762014
// empty prompt passed -> release the slot and send empty response

0 commit comments

Comments
 (0)