8
8
#include " AddonModelData.h"
9
9
#include " AddonModelLora.h"
10
10
11
- static Napi::Value getNapiToken (const Napi::CallbackInfo& info, llama_model* model , llama_token token) {
11
+ static Napi::Value getNapiToken (const Napi::CallbackInfo& info, const llama_vocab* vocab , llama_token token) {
12
12
if (token < 0 || token == LLAMA_TOKEN_NULL) {
13
13
return Napi::Number::From (info.Env (), -1 );
14
14
}
15
15
16
- auto tokenAttributes = llama_token_get_attr (model , token);
16
+ auto tokenAttributes = llama_vocab_get_attr (vocab , token);
17
17
18
18
if (tokenAttributes & LLAMA_TOKEN_ATTR_UNDEFINED || tokenAttributes & LLAMA_TOKEN_ATTR_UNKNOWN) {
19
19
return Napi::Number::From (info.Env (), -1 );
@@ -22,12 +22,12 @@ static Napi::Value getNapiToken(const Napi::CallbackInfo& info, llama_model* mod
22
22
return Napi::Number::From (info.Env (), token);
23
23
}
24
24
25
- static Napi::Value getNapiControlToken (const Napi::CallbackInfo& info, llama_model* model , llama_token token) {
25
+ static Napi::Value getNapiControlToken (const Napi::CallbackInfo& info, const llama_vocab* vocab , llama_token token) {
26
26
if (token < 0 ) {
27
27
return Napi::Number::From (info.Env (), -1 );
28
28
}
29
29
30
- auto tokenAttributes = llama_token_get_attr (model , token);
30
+ auto tokenAttributes = llama_vocab_get_attr (vocab , token);
31
31
32
32
if (!(tokenAttributes & LLAMA_TOKEN_ATTR_CONTROL) && !(tokenAttributes & LLAMA_TOKEN_ATTR_UNDEFINED)) {
33
33
return Napi::Number::From (info.Env (), -1 );
@@ -93,6 +93,7 @@ class AddonModelLoadModelWorker : public Napi::AsyncWorker {
93
93
void Execute () {
94
94
try {
95
95
model->model = llama_model_load_from_file (model->modelPath .c_str (), model->model_params );
96
+ model->vocab = llama_model_get_vocab (model->model );
96
97
97
98
model->modelLoaded = model->model != nullptr && model->model != NULL ;
98
99
} catch (const std::exception& e) {
@@ -190,7 +191,7 @@ class AddonModelLoadLoraWorker : public Napi::AsyncWorker {
190
191
191
192
void Execute () {
192
193
try {
193
- const auto loraAdapter = llama_lora_adapter_init (modelLora->model ->model , modelLora->loraFilePath .c_str ());
194
+ const auto loraAdapter = llama_adapter_lora_init (modelLora->model ->model , modelLora->loraFilePath .c_str ());
194
195
195
196
if (loraAdapter == nullptr ) {
196
197
SetError (
@@ -213,7 +214,7 @@ class AddonModelLoadLoraWorker : public Napi::AsyncWorker {
213
214
} catch (const std::exception& e) {
214
215
SetError (e.what ());
215
216
} catch (...) {
216
- SetError (" Unknown error when calling \" llama_lora_adapter_init \" " );
217
+ SetError (" Unknown error when calling \" llama_adapter_lora_init \" " );
217
218
}
218
219
}
219
220
void OnOK () {
@@ -426,7 +427,7 @@ Napi::Value AddonModel::Tokenize(const Napi::CallbackInfo& info) {
426
427
std::string text = info[0 ].As <Napi::String>().Utf8Value ();
427
428
bool specialTokens = info[1 ].As <Napi::Boolean>().Value ();
428
429
429
- std::vector<llama_token> tokens = common_tokenize (model , text, false , specialTokens);
430
+ std::vector<llama_token> tokens = common_tokenize (vocab , text, false , specialTokens);
430
431
431
432
Napi::Uint32Array result = Napi::Uint32Array::New (info.Env (), tokens.size ());
432
433
for (size_t i = 0 ; i < tokens.size (); ++i) {
@@ -449,10 +450,10 @@ Napi::Value AddonModel::Detokenize(const Napi::CallbackInfo& info) {
449
450
std::string result;
450
451
result.resize (std::max (result.capacity (), tokens.ElementLength ()));
451
452
452
- int n_chars = llama_detokenize (model , (llama_token*)tokens.Data (), tokens.ElementLength (), &result[0 ], result.size (), false , decodeSpecialTokens);
453
+ int n_chars = llama_detokenize (vocab , (llama_token*)tokens.Data (), tokens.ElementLength (), &result[0 ], result.size (), false , decodeSpecialTokens);
453
454
if (n_chars < 0 ) {
454
455
result.resize (-n_chars);
455
- n_chars = llama_detokenize (model , (llama_token*)tokens.Data (), tokens.ElementLength (), &result[0 ], result.size (), false , decodeSpecialTokens);
456
+ n_chars = llama_detokenize (vocab , (llama_token*)tokens.Data (), tokens.ElementLength (), &result[0 ], result.size (), false , decodeSpecialTokens);
456
457
GGML_ASSERT (n_chars <= result.size ()); // whitespace trimming is performed after per-token detokenization
457
458
}
458
459
@@ -467,7 +468,7 @@ Napi::Value AddonModel::GetTrainContextSize(const Napi::CallbackInfo& info) {
467
468
return info.Env ().Undefined ();
468
469
}
469
470
470
- return Napi::Number::From (info.Env (), llama_n_ctx_train (model));
471
+ return Napi::Number::From (info.Env (), llama_model_n_ctx_train (model));
471
472
}
472
473
473
474
Napi::Value AddonModel::GetEmbeddingVectorSize (const Napi::CallbackInfo& info) {
@@ -476,7 +477,7 @@ Napi::Value AddonModel::GetEmbeddingVectorSize(const Napi::CallbackInfo& info) {
476
477
return info.Env ().Undefined ();
477
478
}
478
479
479
- return Napi::Number::From (info.Env (), llama_n_embd (model));
480
+ return Napi::Number::From (info.Env (), llama_model_n_embd (model));
480
481
}
481
482
482
483
Napi::Value AddonModel::GetTotalSize (const Napi::CallbackInfo& info) {
@@ -515,68 +516,63 @@ Napi::Value AddonModel::TokenBos(const Napi::CallbackInfo& info) {
515
516
return info.Env ().Undefined ();
516
517
}
517
518
518
- auto token = llama_token_bos (model);
519
- if (token == LLAMA_TOKEN_NULL) {
520
- token = llama_token_cls (model);
521
- }
522
-
523
- return getNapiControlToken (info, model, token);
519
+ return getNapiControlToken (info, vocab, llama_vocab_bos (vocab));
524
520
}
525
521
Napi::Value AddonModel::TokenEos (const Napi::CallbackInfo& info) {
526
522
if (disposed) {
527
523
Napi::Error::New (info.Env (), " Model is disposed" ).ThrowAsJavaScriptException ();
528
524
return info.Env ().Undefined ();
529
525
}
530
526
531
- return getNapiControlToken (info, model, llama_token_eos (model ));
527
+ return getNapiControlToken (info, vocab, llama_vocab_eos (vocab ));
532
528
}
533
529
Napi::Value AddonModel::TokenNl (const Napi::CallbackInfo& info) {
534
530
if (disposed) {
535
531
Napi::Error::New (info.Env (), " Model is disposed" ).ThrowAsJavaScriptException ();
536
532
return info.Env ().Undefined ();
537
533
}
538
534
539
- return getNapiToken (info, model, llama_token_nl (model ));
535
+ return getNapiToken (info, vocab, llama_vocab_nl (vocab ));
540
536
}
541
537
Napi::Value AddonModel::PrefixToken (const Napi::CallbackInfo& info) {
542
538
if (disposed) {
543
539
Napi::Error::New (info.Env (), " Model is disposed" ).ThrowAsJavaScriptException ();
544
540
return info.Env ().Undefined ();
545
541
}
546
542
547
- return getNapiToken (info, model, llama_token_fim_pre (model ));
543
+ return getNapiToken (info, vocab, llama_vocab_fim_pre (vocab ));
548
544
}
549
545
Napi::Value AddonModel::MiddleToken (const Napi::CallbackInfo& info) {
550
546
if (disposed) {
551
547
Napi::Error::New (info.Env (), " Model is disposed" ).ThrowAsJavaScriptException ();
552
548
return info.Env ().Undefined ();
553
549
}
554
550
555
- return getNapiToken (info, model, llama_token_fim_mid (model ));
551
+ return getNapiToken (info, vocab, llama_vocab_fim_mid (vocab ));
556
552
}
557
553
Napi::Value AddonModel::SuffixToken (const Napi::CallbackInfo& info) {
558
554
if (disposed) {
559
555
Napi::Error::New (info.Env (), " Model is disposed" ).ThrowAsJavaScriptException ();
560
556
return info.Env ().Undefined ();
561
557
}
562
558
563
- return getNapiToken (info, model, llama_token_fim_suf (model ));
559
+ return getNapiToken (info, vocab, llama_vocab_fim_suf (vocab ));
564
560
}
565
561
Napi::Value AddonModel::EotToken (const Napi::CallbackInfo& info) {
566
562
if (disposed) {
567
563
Napi::Error::New (info.Env (), " Model is disposed" ).ThrowAsJavaScriptException ();
568
564
return info.Env ().Undefined ();
569
565
}
570
566
571
- return getNapiToken (info, model, llama_token_eot (model ));
567
+ return getNapiToken (info, vocab, llama_vocab_eot (vocab ));
572
568
}
573
569
Napi::Value AddonModel::SepToken (const Napi::CallbackInfo& info) {
574
570
if (disposed) {
575
571
Napi::Error::New (info.Env (), " Model is disposed" ).ThrowAsJavaScriptException ();
576
572
return info.Env ().Undefined ();
577
573
}
578
574
579
- return getNapiToken (info, model, llama_token_sep (model ));
575
+ return getNapiToken (info, vocab, llama_vocab_sep (vocab ));
580
576
}
581
577
Napi::Value AddonModel::GetTokenString (const Napi::CallbackInfo& info) {
582
578
if (disposed) {
@@ -587,7 +583,7 @@ Napi::Value AddonModel::GetTokenString(const Napi::CallbackInfo& info) {
587
583
int token = info[0 ].As <Napi::Number>().Int32Value ();
588
584
std::stringstream ss;
589
585
590
- const char * str = llama_token_get_text (model , token);
586
+ const char * str = llama_vocab_get_text (vocab , token);
591
587
if (str == nullptr ) {
592
588
return info.Env ().Undefined ();
593
589
}
@@ -608,7 +604,7 @@ Napi::Value AddonModel::GetTokenAttributes(const Napi::CallbackInfo& info) {
608
604
}
609
605
610
606
int token = info[0 ].As <Napi::Number>().Int32Value ();
611
- auto tokenAttributes = llama_token_get_attr (model , token);
607
+ auto tokenAttributes = llama_vocab_get_attr (vocab , token);
612
608
613
609
return Napi::Number::From (info.Env (), int32_t (tokenAttributes));
614
610
}
@@ -624,25 +620,25 @@ Napi::Value AddonModel::IsEogToken(const Napi::CallbackInfo& info) {
624
620
625
621
int token = info[0 ].As <Napi::Number>().Int32Value ();
626
622
627
- return Napi::Boolean::New (info.Env (), llama_token_is_eog (model , token));
623
+ return Napi::Boolean::New (info.Env (), llama_vocab_is_eog (vocab , token));
628
624
}
629
625
Napi::Value AddonModel::GetVocabularyType (const Napi::CallbackInfo& info) {
630
626
if (disposed) {
631
627
Napi::Error::New (info.Env (), " Model is disposed" ).ThrowAsJavaScriptException ();
632
628
return info.Env ().Undefined ();
633
629
}
634
630
635
- auto vocabularyType = llama_vocab_type (model );
631
+ auto vocabularyType = llama_vocab_type (vocab );
636
632
637
633
return Napi::Number::From (info.Env (), int32_t (vocabularyType));
638
634
}
639
635
Napi::Value AddonModel::ShouldPrependBosToken (const Napi::CallbackInfo& info) {
640
- const bool addBos = llama_add_bos_token (model );
636
+ const bool addBos = llama_vocab_get_add_bos (vocab );
641
637
642
638
return Napi::Boolean::New (info.Env (), addBos);
643
639
}
644
640
Napi::Value AddonModel::ShouldAppendEosToken (const Napi::CallbackInfo& info) {
645
- const bool addEos = llama_add_eos_token (model );
641
+ const bool addEos = llama_vocab_get_add_eos (vocab );
646
642
647
643
return Napi::Boolean::New (info.Env (), addEos);
648
644
}
0 commit comments