We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 21ee380 commit 95e433cCopy full SHA for 95e433c
src/llama.cpp
@@ -6455,7 +6455,7 @@ static void llm_load_vocab(
6455
} else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
6456
vocab.linefeed_id = vocab.special_pad_id;
6457
} else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
6458
- const std::vector<int> ids = llama_tokenize_internal(model.vocab, "\n", false);
+ const std::vector<int> ids = llama_tokenize_internal(vocab, "\n", false);
6459
GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
6460
vocab.linefeed_id = ids[0];
6461
} else {
0 commit comments