Skip to content

Commit 6e13d3f

Browse files
committed
Merge remote-tracking branch 'upstream/master' into fix-stop-trim
2 parents 40a68f4 + 755a9b2 commit 6e13d3f

File tree

22 files changed

+743
-88
lines changed

22 files changed

+743
-88
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ Typically finetunes of the base models below are supported as well.
130130
- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart)
131131
- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggerganov/llama.cpp/pull/6326)
132132
- Guile Scheme: [guile_llama_cpp](https://savannah.nongnu.org/projects/guile-llama-cpp)
133+
- Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift)
133134

134135
**UI:**
135136

common/arg.cpp

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -947,6 +947,20 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
947947
params.sparams.tfs_z = std::stof(value);
948948
}
949949
).set_sparam());
950+
add_opt(common_arg(
951+
{"--xtc-probability"}, "N",
952+
string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sparams.xtc_probability),
953+
[](common_params & params, const std::string & value) {
954+
params.sparams.xtc_probability = std::stof(value);
955+
}
956+
).set_sparam());
957+
add_opt(common_arg(
958+
{"--xtc-threshold"}, "N",
959+
string_format("xtc threshold (default: %.1f, 1.0 = disabled)", (double)params.sparams.xtc_threshold),
960+
[](common_params & params, const std::string & value) {
961+
params.sparams.xtc_threshold = std::stof(value);
962+
}
963+
).set_sparam());
950964
add_opt(common_arg(
951965
{"--typical"}, "N",
952966
string_format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sparams.typ_p),
@@ -1788,6 +1802,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
17881802
params.n_threads_http = value;
17891803
}
17901804
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
1805+
add_opt(common_arg(
1806+
{"--cache-reuse"}, "N",
1807+
string_format("min chunk size to attempt reusing from the cache via KV shifting (default: %d)", params.n_cache_reuse),
1808+
[](common_params & params, int value) {
1809+
params.n_cache_reuse = value;
1810+
}
1811+
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_REUSE"));
17911812
add_opt(common_arg(
17921813
{"--metrics"},
17931814
string_format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),

common/common.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2104,6 +2104,8 @@ void yaml_dump_non_result_info(FILE * stream, const common_params & params, cons
21042104
fprintf(stream, "top_k: %d # default: 40\n", sparams.top_k);
21052105
fprintf(stream, "top_p: %f # default: 0.95\n", sparams.top_p);
21062106
fprintf(stream, "min_p: %f # default: 0.0\n", sparams.min_p);
2107+
fprintf(stream, "xtc_probability: %f # default: 0.0\n", sparams.xtc_probability);
2108+
fprintf(stream, "xtc_threshold: %f # default: 0.1\n", sparams.xtc_threshold);
21072109
fprintf(stream, "typ_p: %f # default: 1.0\n", sparams.typ_p);
21082110
fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false");
21092111
fprintf(stream, "display_prompt: %s # default: true\n", params.display_prompt ? "true" : "false");

common/common.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,8 @@ enum common_sampler_type {
9090
COMMON_SAMPLER_TYPE_TFS_Z = 4,
9191
COMMON_SAMPLER_TYPE_TYPICAL_P = 5,
9292
COMMON_SAMPLER_TYPE_TEMPERATURE = 6,
93+
COMMON_SAMPLER_TYPE_XTC = 7,
94+
COMMON_SAMPLER_TYPE_INFILL = 8,
9395
};
9496

9597
// dimensionality reduction methods, used by cvector-generator
@@ -108,6 +110,8 @@ struct common_sampler_params {
108110
int32_t top_k = 40; // <= 0 to use vocab size
109111
float top_p = 0.95f; // 1.0 = disabled
110112
float min_p = 0.05f; // 0.0 = disabled
113+
float xtc_probability = 0.00f; // 0.0 = disabled
114+
float xtc_threshold = 0.10f; // > 0.5 disables XTC
111115
float tfs_z = 1.00f; // 1.0 = disabled
112116
float typ_p = 1.00f; // typical_p, 1.0 = disabled
113117
float temp = 0.80f; // <= 0.0 to sample greedily, 0.0 to not output probabilities
@@ -124,13 +128,15 @@ struct common_sampler_params {
124128
bool ignore_eos = false;
125129
bool no_perf = false; // disable performance metrics
126130

131+
127132
std::vector<enum common_sampler_type> samplers = {
128133
COMMON_SAMPLER_TYPE_TOP_K,
129134
COMMON_SAMPLER_TYPE_TFS_Z,
130135
COMMON_SAMPLER_TYPE_TYPICAL_P,
131136
COMMON_SAMPLER_TYPE_TOP_P,
132137
COMMON_SAMPLER_TYPE_MIN_P,
133-
COMMON_SAMPLER_TYPE_TEMPERATURE
138+
COMMON_SAMPLER_TYPE_XTC,
139+
COMMON_SAMPLER_TYPE_TEMPERATURE,
134140
};
135141

136142
std::string grammar; // optional BNF-like grammar to constrain sampling
@@ -277,7 +283,8 @@ struct common_params {
277283
int32_t port = 8080; // server listens on this network port
278284
int32_t timeout_read = 600; // http read timeout in seconds
279285
int32_t timeout_write = timeout_read; // http write timeout in seconds
280-
int n_threads_http = -1; // number of threads to process HTTP requests (TODO: support threadpool)
286+
int32_t n_threads_http = -1; // number of threads to process HTTP requests (TODO: support threadpool)
287+
int32_t n_cache_reuse = 0; // min chunk size to reuse from the cache via KV shifting
281288

282289
std::string hostname = "127.0.0.1";
283290
std::string public_path = ""; // NOLINT

common/sampling.cpp

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -130,10 +130,10 @@ std::string common_sampler_params::print() const {
130130

131131
snprintf(result, sizeof(result),
132132
"\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
133-
"\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, min_p = %.3f, typical_p = %.3f, temp = %.3f\n"
133+
"\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, min_p = %.3f, xtc_probability = %.3f, xtc_threshold = %.3f, typical_p = %.3f, temp = %.3f\n"
134134
"\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f",
135135
penalty_last_n, penalty_repeat, penalty_freq, penalty_present,
136-
top_k, tfs_z, top_p, min_p, typ_p, temp,
136+
top_k, tfs_z, top_p, min_p, xtc_probability, xtc_threshold, typ_p, temp,
137137
mirostat, mirostat_eta, mirostat_tau);
138138

139139
return std::string(result);
@@ -184,6 +184,9 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
184184
case COMMON_SAMPLER_TYPE_MIN_P:
185185
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
186186
break;
187+
case COMMON_SAMPLER_TYPE_XTC:
188+
llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
189+
break;
187190
case COMMON_SAMPLER_TYPE_TFS_Z:
188191
llama_sampler_chain_add(result->chain, llama_sampler_init_tail_free(params.tfs_z, params.min_keep));
189192
break;
@@ -193,6 +196,9 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
193196
case COMMON_SAMPLER_TYPE_TEMPERATURE:
194197
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
195198
break;
199+
case COMMON_SAMPLER_TYPE_INFILL:
200+
llama_sampler_chain_add(result->chain, llama_sampler_init_infill (model));
201+
break;
196202
default:
197203
GGML_ASSERT(false && "unknown sampler type");
198204
}
@@ -372,6 +378,8 @@ char common_sampler_type_to_chr(enum common_sampler_type cnstr) {
372378
case COMMON_SAMPLER_TYPE_TOP_P: return 'p';
373379
case COMMON_SAMPLER_TYPE_MIN_P: return 'm';
374380
case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't';
381+
case COMMON_SAMPLER_TYPE_XTC: return 'x';
382+
case COMMON_SAMPLER_TYPE_INFILL: return 'i';
375383
default : return '?';
376384
}
377385
}
@@ -384,6 +392,8 @@ std::string common_sampler_type_to_str(enum common_sampler_type cnstr) {
384392
case COMMON_SAMPLER_TYPE_TOP_P: return "top_p";
385393
case COMMON_SAMPLER_TYPE_MIN_P: return "min_p";
386394
case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature";
395+
case COMMON_SAMPLER_TYPE_XTC: return "xtc";
396+
case COMMON_SAMPLER_TYPE_INFILL: return "infill";
387397
default : return "";
388398
}
389399
}
@@ -396,6 +406,8 @@ std::vector<common_sampler_type> common_sampler_types_from_names(const std::vect
396406
{ "min_p", COMMON_SAMPLER_TYPE_MIN_P },
397407
{ "tfs_z", COMMON_SAMPLER_TYPE_TFS_Z },
398408
{ "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE },
409+
{ "xtc", COMMON_SAMPLER_TYPE_XTC },
410+
{ "infill", COMMON_SAMPLER_TYPE_INFILL },
399411
};
400412

401413
// since samplers names are written multiple ways
@@ -441,7 +453,9 @@ std::vector<common_sampler_type> common_sampler_types_from_chars(const std::stri
441453
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P), COMMON_SAMPLER_TYPE_TYPICAL_P },
442454
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P), COMMON_SAMPLER_TYPE_TOP_P },
443455
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P), COMMON_SAMPLER_TYPE_MIN_P },
444-
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE }
456+
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE },
457+
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC), COMMON_SAMPLER_TYPE_XTC },
458+
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_INFILL), COMMON_SAMPLER_TYPE_INFILL },
445459
};
446460

447461
std::vector<common_sampler_type> samplers;

examples/main/README.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,19 @@ The `--mirostat-ent` option sets the Mirostat target entropy (tau), which repres
241241

242242
Example usage: `--mirostat 2 --mirostat-lr 0.05 --mirostat-ent 3.0`
243243

244+
### XTC Sampling
245+
246+
- `--xtc-probability N`: Sets the chance for token removal (checked once on sampler start) (default: 0.0).
247+
- `--xtc-threshold N`: Sets a minimum probability threshold for tokens to be removed (default: 0.1).
248+
249+
Exclude Top Choices (XTC) is a unique sampler that is designed to remove top tokens from consideration and avoid more obvious and repetitive outputs. With a chance of `xtc-probability` it searches for tokens with probabilities of `xtc-threshold` and above, then removes all such tokens except the least probable one.
250+
251+
By removing top tokens XTC can improve the variety of answers, break writing clichés and inhibit repition, since clichés and repeated phrases are usually more likely to appear. By keeping the last token above the threshold, XTC ensures that the answer is still coherent. XTC is meant to be used for creative tasks, but feel free to experiment with different settings for different models.
252+
253+
Being experimental and unique, XTC is disabled by default. The recommended combination of samplers is Min-P followed by XTC on its default settings: `--sampling-seq mx --min-p 0.02 --xtc-probability 0.5`.
254+
255+
Example usage: `--xtc-probability 0.5 --xtc-threshold 0.1`
256+
244257
### Logit Bias
245258

246259
- `-l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS`: Modify the likelihood of a token appearing in the generated text completion.

examples/main/main.cpp

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -569,30 +569,30 @@ int main(int argc, char ** argv) {
569569
if (!params.ctx_shift){
570570
LOG_DBG("\n\n%s: context full and context shift is disabled => stopping\n", __func__);
571571
break;
572-
} else {
573-
if (params.n_predict == -2) {
574-
LOG_DBG("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
575-
break;
576-
}
572+
}
573+
574+
if (params.n_predict == -2) {
575+
LOG_DBG("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
576+
break;
577+
}
577578

578-
const int n_left = n_past - params.n_keep;
579-
const int n_discard = n_left/2;
579+
const int n_left = n_past - params.n_keep;
580+
const int n_discard = n_left/2;
580581

581-
LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
582-
n_past, n_left, n_ctx, params.n_keep, n_discard);
582+
LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
583+
n_past, n_left, n_ctx, params.n_keep, n_discard);
583584

584-
llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
585-
llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
585+
llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
586+
llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
586587

587-
n_past -= n_discard;
588+
n_past -= n_discard;
588589

589-
LOG_DBG("after swap: n_past = %d\n", n_past);
590+
LOG_DBG("after swap: n_past = %d\n", n_past);
590591

591-
LOG_DBG("embd: %s\n", string_from(ctx, embd).c_str());
592+
LOG_DBG("embd: %s\n", string_from(ctx, embd).c_str());
592593

593-
LOG_DBG("clear session path\n");
594-
path_session.clear();
595-
}
594+
LOG_DBG("clear session path\n");
595+
path_session.clear();
596596
}
597597
} else {
598598
// context extension via Self-Extend

examples/server/README.md

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ The project is under active development, and we are [looking for feedback and co
147147
| `--ssl-cert-file FNAME` | path to file a PEM-encoded SSL certificate<br/>(env: LLAMA_ARG_SSL_CERT_FILE) |
148148
| `-to, --timeout N` | server read/write timeout in seconds (default: 600)<br/>(env: LLAMA_ARG_TIMEOUT) |
149149
| `--threads-http N` | number of threads used to process HTTP requests (default: -1)<br/>(env: LLAMA_ARG_THREADS_HTTP) |
150+
| `--cache-reuse N` | min chunk size to attempt reusing from the cache via KV shifting (default: 0)<br/>(env: LLAMA_ARG_CACHE_REUSE) |
150151
| `--metrics` | enable prometheus compatible metrics endpoint (default: disabled)<br/>(env: LLAMA_ARG_ENDPOINT_METRICS) |
151152
| `--slots` | enable slots monitoring endpoint (default: disabled)<br/>(env: LLAMA_ARG_ENDPOINT_SLOTS) |
152153
| `--props` | enable changing global properties via POST /props (default: disabled)<br/>(env: LLAMA_ARG_ENDPOINT_PROPS) |
@@ -523,8 +524,31 @@ Takes a prefix and a suffix and returns the predicted completion as stream.
523524

524525
- `input_prefix`: Set the prefix of the code to infill.
525526
- `input_suffix`: Set the suffix of the code to infill.
527+
- `input_extra`: Additional context inserted before the FIM prefix.
528+
- `prompt`: Added after the `FIM_MID` token
526529

527-
It also accepts all the options of `/completion`.
530+
`input_extra` is array of `{"filename": string, "text": string}` objects.
531+
532+
The endpoint also accepts all the options of `/completion`.
533+
534+
If the model has `FIM_REPO` and `FIM_FILE_SEP` tokens, the [repo-level pattern](https://arxiv.org/pdf/2409.12186) is used:
535+
536+
```txt
537+
<FIM_REP>myproject
538+
<FIM_SEP>{chunk 0 filename}
539+
{chunk 0 text}
540+
<FIM_SEP>{chunk 1 filename}
541+
{chunk 1 text}
542+
...
543+
<FIM_SEP>filename
544+
<FIM_PRE>[input_prefix]<FIM_SUF>[input_suffix]<FIM_MID>[prompt]
545+
```
546+
547+
If the tokens are missing, then the extra context is simply prefixed at the start:
548+
549+
```txt
550+
[input_extra]<FIM_PRE>[input_prefix]<FIM_SUF>[input_suffix]<FIM_MID>[prompt]
551+
```
528552

529553
### **GET** `/props`: Get server global properties.
530554

examples/server/public/index-new.html

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,8 @@
4343
top_k: 0, // <= 0 to use vocab size
4444
top_p: 1.0, // 1.0 = disabled
4545
min_p: 0.05, // 0 = disabled; recommended for non-english: ~ 0.4
46+
xtc_probability: 0.0, // 0 = disabled;
47+
xtc_threshold: 0.1, // > 0.5 disables XTC;
4648
tfs_z: 1.0, // 1.0 = disabled
4749
typical_p: 1.0, // 1.0 = disabled
4850
presence_penalty: 0.0, // 0.0 = disabled
@@ -836,6 +838,8 @@
836838
${FloatField({ label: "TFS-Z", title: "Activates tail-free sampling, a method used to limit the prediction of tokens that are too frequent. The parameter z controls the strength of this limitation. A value of 1.0 means that this function is deactivated.", max: 1.0, min: 0.0, name: "tfs_z", step: 0.01, value: params.value.tfs_z })}
837839
${FloatField({ label: "Frequency Penalty", title: "A penalty that is applied based on the frequency with which certain tokens occur in the training data set. A higher value results in rare tokens being favoured.", max: 1.0, min: 0.0, name: "frequency_penalty", step: 0.01, value: params.value.frequency_penalty })}
838840
${FloatField({ label: "Typical-P", title: "Activates local typical sampling, a method used to limit the prediction of tokens that are atypical in the current context. The parameter p controls the strength of this limitation. A value of 1.0 means that this function is deactivated.", max: 1.0, min: 0.0, name: "typical_p", step: 0.01, value: params.value.typical_p })}
841+
${FloatField({ label: "XTC probability", title: "Sets the chance for token removal (checked once on sampler start)", max: 1.0, min: 0.0, name: "xtc_probability", step: 0.01, value: params.value.xtc_probability })}
842+
${FloatField({ label: "XTC threshold", title: "Sets a minimum probability threshold for tokens to be removed", max: 0.5, min: 0.0, name: "xtc_threshold", step: 0.01, value: params.value.xtc_threshold })}
839843
${IntField({ label: "Min Keep", title: "If greater than 0, samplers are forced to return N possible tokens at minimum. Default is 0", max: 10, min: 0, name: "min_keep", value: params.value.min_keep })}
840844
</fieldset>
841845
@@ -1132,6 +1136,8 @@ <h2>llama.cpp</h2>
11321136
const snapSettings = {
11331137
temperature: { snapValue: 1.0, snapRangeMultiplier: 6 },
11341138
min_p: { snapValue: 0.05, snapRangeMultiplier: 2 },
1139+
xtc_probability: { snapValue: 0.0, snapRangeMultiplier: 4 },
1140+
xtc_threshold: { snapValue: 0.5, snapRangeMultiplier: 4 },
11351141
top_p: { snapValue: 1.0, snapRangeMultiplier: 4 },
11361142
tfs_z: { snapValue: 1.0, snapRangeMultiplier: 4 },
11371143
typical_p: { snapValue: 1.0, snapRangeMultiplier: 4 },

examples/server/public/index.html

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,8 @@
307307
top_k: 40, // <= 0 to use vocab size
308308
top_p: 0.95, // 1.0 = disabled
309309
min_p: 0.05, // 0 = disabled
310+
xtc_probability: 0.0, // 0 = disabled;
311+
xtc_threshold: 0.1, // > 0.5 disables XTC;
310312
tfs_z: 1.0, // 1.0 = disabled
311313
typical_p: 1.0, // 1.0 = disabled
312314
presence_penalty: 0.0, // 0.0 = disabled
@@ -1013,6 +1015,8 @@
10131015
${FloatField({ label: "Typical P", max: 1.0, min: 0.0, name: "typical_p", step: 0.01, value: params.value.typical_p })}
10141016
${FloatField({ label: "Presence penalty", max: 1.0, min: 0.0, name: "presence_penalty", step: 0.01, value: params.value.presence_penalty })}
10151017
${FloatField({ label: "Frequency penalty", max: 1.0, min: 0.0, name: "frequency_penalty", step: 0.01, value: params.value.frequency_penalty })}
1018+
${FloatField({ label: "XTC probability", max: 1.0, min: 0.0, name: "xtc_probability", step: 0.01, value: params.value.xtc_probability })}
1019+
${FloatField({ label: "XTC threshold", max: 0.5, min: 0.0, name: "xtc_threshold", step: 0.01, value: params.value.xtc_threshold })}
10161020
</fieldset>
10171021
<hr />
10181022
<fieldset class="three">

0 commit comments

Comments
 (0)