|
| 1 | +{ |
| 2 | + "conversations": { |
| 3 | + "deleteConfirm": "Etes vous certain de vouloir supprimer cette conversation ?", |
| 4 | + "deleteBtn": "Supprimer" |
| 5 | + }, |
| 6 | + "config": { |
| 7 | + "meaning": { |
| 8 | + "apiKey": "Set the API Key if you are using --api-key option for the server.", |
| 9 | + "systemMessage": "The starting message that defines how model should behave.", |
| 10 | + "samplers": "The order at which samplers are applied, in simplified way. Default is 'dkypmxt' : dry->top_k->typ_p->top_p->min_p->xtc->temperature", |
| 11 | + "temperature": "Controls the randomness of the generated text by affecting the probability distribution of the output tokens. Higher = more random, lower = more focused.", |
| 12 | + "dynatemp_range": "Addon for the temperature sampler. The added value to the range of dynamic temperature, which adjusts probabilities by entropy of tokens.", |
| 13 | + "dynatemp_exponent": "Addon for the temperature sampler. Smoothes out the probability redistribution based on the most probable token.", |
| 14 | + "top_k": "Keeps only k top tokens.", |
| 15 | + "top_p": "Limits tokens to those that together have a cumulative probability of at least p", |
| 16 | + "min_p": "Limits tokens based on the minimum probability for a token to be considered, relative to the probability of the most likely token.", |
| 17 | + "xtc_probability": "XTC sampler cuts out top tokens; this parameter controls the chance of cutting tokens at all. 0 disables XTC.", |
| 18 | + "xtc_threshold": "XTC sampler cuts out top tokens; this parameter controls the token probability that is required to cut that token.", |
| 19 | + "typical_p": "Sorts and limits tokens based on the difference between log-probability and entropy.", |
| 20 | + "repeat_last_n": "Last n tokens to consider for penalizing repetition", |
| 21 | + "repeat_penalty": "Controls the repetition of token sequences in the generated text", |
| 22 | + "presence_penalty": "Limits tokens based on whether they appear in the output or not.", |
| 23 | + "frequency_penalty": "Limits tokens based on how often they appear in the output.", |
| 24 | + "dry_multiplier": "DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling multiplier.", |
| 25 | + "dry_base": "DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling base value.", |
| 26 | + "dry_allowed_length": "DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the allowed length for DRY sampling.", |
| 27 | + "dry_penalty_last_n": "DRY sampling reduces repetition in generated text even across long contexts. This parameter sets DRY penalty for the last n tokens.", |
| 28 | + "max_tokens": "The maximum number of token per output." |
| 29 | + } |
| 30 | + } |
| 31 | +} |
0 commit comments