Skip to content

Commit b0298da

Browse files
authored
Merge branch 'ggml-org:master' into mradermacher
2 parents 6f238fa + 971f245 commit b0298da

File tree

2 files changed

+6
-1
lines changed

2 files changed

+6
-1
lines changed

ggml/src/ggml-opencl/ggml-opencl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1521,7 +1521,7 @@ static void ggml_cl2_free(void) {
15211521
info.cmd_complete_duration_ns/1.e6f,
15221522
info.cmd_total_duration_ns/1.e6f,
15231523
info.global_size[0], info.global_size[1], info.global_size[2],
1524-
info.local_size[0], info.local_size[2], info.local_size[2],
1524+
info.local_size[0], info.local_size[1], info.local_size[2],
15251525
info.output_size[0], info.output_size[1], info.output_size[2], info.output_size[3]);
15261526
}
15271527
fclose(fperf);

src/llama-vocab.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1841,6 +1841,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18411841
if (false
18421842
|| t.first == "<|fim_prefix|>" // Qwen
18431843
|| t.first == "<fim-prefix>"
1844+
|| t.first == "<fim_prefix>" // Granite
18441845
|| t.first == "<|fim▁begin|>" // DeepSeek
18451846
|| t.first == "<PRE>"
18461847
|| t.first == "▁<PRE>" // CodeLlama
@@ -1859,6 +1860,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18591860
if (false
18601861
|| t.first == "<|fim_suffix|>" // Qwen
18611862
|| t.first == "<fim-suffix>"
1863+
|| t.first == "<fim_suffix>" // Granite
18621864
|| t.first == "<|fim▁hole|>" // DeepSeek
18631865
|| t.first == "<SUF>"
18641866
|| t.first == "▁<SUF>" // CodeLlama
@@ -1877,6 +1879,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18771879
if (false
18781880
|| t.first == "<|fim_middle|>" // Qwen
18791881
|| t.first == "<fim-middle>"
1882+
|| t.first == "<fim_middle>" // Granite
18801883
|| t.first == "<|fim▁end|>" // DeepSeek
18811884
|| t.first == "<MID>"
18821885
|| t.first == "▁<MID>" // CodeLlama
@@ -1895,6 +1898,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18951898
if (false
18961899
|| t.first == "<|fim_pad|>" // Qwen
18971900
|| t.first == "<fim-pad>"
1901+
|| t.first == "<fim_pad>" // Granite
18981902
|| t.first == "<PAD>"
18991903
) {
19001904
special_fim_pad_id = t.second;
@@ -1913,6 +1917,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
19131917
|| t.first == "<|repo_name|>"
19141918
|| t.first == "<fim-repo>"
19151919
|| t.first == "<REPO>"
1920+
|| t.first == "<reponame>" // Granite
19161921
) {
19171922
special_fim_rep_id = t.second;
19181923
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {

0 commit comments

Comments
 (0)