Skip to content

Commit 2dd670e

Browse files
committed
vocab : add special infill tokens for CodeLlama
The commit adds the following special tokens for CodeLlama infill: - `▁<PRE>` - `▁<SUF>` - `▁<MID>` The motivation for this is that currently the infill example uses CodeLlama as a suggested model. But when using this model the following error is generated: ```console /llama.cpp-debug/examples/infill/infill.cpp:165: GGML_ASSERT(llama_vocab_fim_pre(vocab) >= 0) failed Could not attach to process. If your uid matches the uid of the target process, check the setting of /proc/sys/kernel/yama/ptrace_scope, or try again as the root user. For more details, see /etc/sysctl.d/10-ptrace.conf ptrace: Operation not permitted. No stack. The program is not being run. 305251 Aborted (core dumped) ./build/bin/llama-infill -t 10 -ngl 0 -m models/codellama-13b.Q5_K_S.gguf \ -c 4096 --temp 0.7 --repeat_penalty 1.1 -n 20 \ --in-prefix "def helloworld():\n print(\"hell" \ --in-suffix "\n print(\"goodbye world\")\n " ```
1 parent bd6e55b commit 2dd670e

File tree

1 file changed

+3
-0
lines changed

1 file changed

+3
-0
lines changed

src/llama-vocab.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1801,6 +1801,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18011801
|| t.first == "<fim-prefix>"
18021802
|| t.first == "<|fim▁begin|>" // DeepSeek
18031803
|| t.first == "<PRE>"
1804+
|| t.first == "▁<PRE>" // CodeLlama
18041805
) {
18051806
special_fim_pre_id = t.second;
18061807
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1818,6 +1819,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18181819
|| t.first == "<fim-suffix>"
18191820
|| t.first == "<|fim▁hole|>" // DeepSeek
18201821
|| t.first == "<SUF>"
1822+
|| t.first == "▁<SUF>" // CodeLlama
18211823
) {
18221824
special_fim_suf_id = t.second;
18231825
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1835,6 +1837,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18351837
|| t.first == "<fim-middle>"
18361838
|| t.first == "<|fim▁end|>" // DeepSeek
18371839
|| t.first == "<MID>"
1840+
|| t.first == "▁<MID>" // CodeLlama
18381841
) {
18391842
special_fim_mid_id = t.second;
18401843
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {

0 commit comments

Comments
 (0)