Skip to content

Commit 1211095

Browse files
committed
fix: remove tfs_z
ref: ggml-org/llama.cpp#10071
1 parent 992a4d6 commit 1211095

File tree

4 files changed

+3
-8
lines changed

4 files changed

+3
-8
lines changed

android/src/main/jni.cpp

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ Java_com_rnllama_LlamaContext_initContext(
176176
defaultParams.cpuparams.n_threads = n_threads > 0 ? n_threads : default_n_threads;
177177

178178
defaultParams.n_gpu_layers = n_gpu_layers;
179-
179+
180180
defaultParams.use_mlock = use_mlock;
181181
defaultParams.use_mmap = use_mmap;
182182

@@ -380,7 +380,6 @@ Java_com_rnllama_LlamaContext_doCompletion(
380380
jfloat min_p,
381381
jfloat xtc_threshold,
382382
jfloat xtc_probability,
383-
jfloat tfs_z,
384383
jfloat typical_p,
385384
jint seed,
386385
jobjectArray stop,
@@ -419,7 +418,6 @@ Java_com_rnllama_LlamaContext_doCompletion(
419418
sparams.top_k = top_k;
420419
sparams.top_p = top_p;
421420
sparams.min_p = min_p;
422-
sparams.tfs_z = tfs_z;
423421
sparams.typ_p = typical_p;
424422
sparams.n_probs = n_probs;
425423
sparams.grammar = env->GetStringUTFChars(grammar, nullptr);
@@ -546,7 +544,7 @@ Java_com_rnllama_LlamaContext_doCompletion(
546544
putInt(env, result, "tokens_cached", llama->n_past);
547545

548546
const auto timings_token = llama_perf_context(llama -> ctx);
549-
547+
550548
auto timingsResult = createWriteableMap(env);
551549
putInt(env, timingsResult, "prompt_n", timings_token.n_p_eval);
552550
putInt(env, timingsResult, "prompt_ms", timings_token.t_p_eval_ms);
@@ -643,7 +641,7 @@ Java_com_rnllama_LlamaContext_embedding(
643641
llama->rewind();
644642

645643
llama_perf_context_reset(llama->ctx);
646-
644+
647645
llama->params.prompt = text_chars;
648646

649647
llama->params.n_predict = 0;

example/src/App.tsx

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,6 @@ export default function App() {
353353
temperature: 0.7,
354354
top_k: 40, // <= 0 to use vocab size
355355
top_p: 0.5, // 1.0 = disabled
356-
tfs_z: 1.0, // 1.0 = disabled
357356
typical_p: 1.0, // 1.0 = disabled
358357
penalty_last_n: 256, // 0 = disable penalty, -1 = context size
359358
penalty_repeat: 1.18, // 1.0 = disabled

ios/RNLlamaContext.mm

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,6 @@ - (NSDictionary *)completion:(NSDictionary *)params
209209
if (params[@"min_p"]) sparams.min_p = [params[@"min_p"] doubleValue];
210210
if (params[@"xtc_threshold"]) sparams.xtc_threshold = [params[@"xtc_threshold"] doubleValue];
211211
if (params[@"xtc_probability"]) sparams.xtc_probability = [params[@"xtc_probability"] doubleValue];
212-
if (params[@"tfs_z"]) sparams.tfs_z = [params[@"tfs_z"] doubleValue];
213212
if (params[@"typical_p"]) sparams.typ_p = [params[@"typical_p"] doubleValue];
214213

215214
if (params[@"grammar"]) {

src/NativeRNLlama.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ export type NativeCompletionParams = {
3737
min_p?: number
3838
xtc_threshold?: number
3939
xtc_probability?: number
40-
tfs_z?: number
4140
typical_p?: number
4241
temperature?: number // -> temp
4342
penalty_last_n?: number

0 commit comments

Comments
 (0)