@@ -5,17 +5,13 @@ import type { OllamaChatTemplateMapEntry } from "./types";
55
66/** 
77 * Skipped these models due to error: 
8-  * - library/nous-hermes2:34b 
9-  * - library/stablelm2:12b 
10-  * - library/deepseek-v2:16b 
11-  * - library/wizard-math:13b 
12-  * - library/neural-chat:7b 
13-  * - library/stable-code:3b 
14-  * - library/wizard-math:70b 
15-  * - library/dolphin-phi:2.7b 
16-  * - library/firefunction-v2:70b 
17-  * - library/granite3.2:8b 
18-  * - library/r1-1776:671b 
8+  * - library/gemma3:1b 
9+  * - library/qwen2.5:3b 
10+  * - library/qwen:7b 
11+  * - library/llama3.2-vision:11b 
12+  * - library/starcoder2:3b 
13+  * - library/everythinglm:13b 
14+  * - library/falcon3:7b 
1915 */ 
2016
2117export  const  OLLAMA_CHAT_TEMPLATE_MAPPING : OllamaChatTemplateMapEntry [ ]  =  [ 
@@ -323,6 +319,36 @@ export const OLLAMA_CHAT_TEMPLATE_MAPPING: OllamaChatTemplateMapEntry[] = [
323319			} , 
324320		} , 
325321	} , 
322+ 	{ 
323+ 		model : "library/exaone-deep:2.4b" , 
324+ 		gguf : "{% for message in messages %}{% if loop.first and message['role'] != 'system' %}{{ '[|system|][|endofturn|]\\n' }}{% endif %}{% set content = message['content'] %}{% if '</thought>' in content %}{% set content = content.split('</thought>')[-1].lstrip('\\n') %}{% endif %}{{ '[|' + message['role'] + '|]' + content }}{% if not message['role'] == 'user' %}{{ '[|endofturn|]' }}{% endif %}{% if not loop.last %}{{ '\\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\\n[|assistant|]<thought>\\n' }}{% endif %}" , 
325+ 		ollama : { 
326+ 			template :
327+ 				'{{- range $i, $_ := .Messages }}\n{{- $last := eq (len (slice $.Messages $i)) 1 -}}\n{{ if eq .Role "system" }}[|system|]{{ .Content }}[|endofturn|]\n{{ continue }}\n{{ else if eq .Role "user" }}[|user|]{{ .Content }}\n{{ else if eq .Role "assistant" }}[|assistant|]{{ .Content }}[|endofturn|]\n{{ end }}\n{{- if and (ne .Role "assistant") $last }}[|assistant|]{{ end }}\n{{- end -}}' , 
328+ 			tokens : [ "<thought>" ] , 
329+ 			params : { 
330+ 				repeat_penalty : 1 , 
331+ 				stop : [ "[|endofturn|]" ] , 
332+ 				temperature : 0.6 , 
333+ 				top_p : 0.95 , 
334+ 			} , 
335+ 		} , 
336+ 	} , 
337+ 	{ 
338+ 		model : "library/exaone-deep:32b" , 
339+ 		gguf : "{% for message in messages %}{% if loop.first and message['role'] != 'system' %}{{ '[|system|][|endofturn|]\n' }}{% endif %}{% set content = message['content'] %}{% if '</thought>' in content %}{% set content = content.split('</thought>')[-1].lstrip('\\n') %}{% endif %}{{ '[|' + message['role'] + '|]' + content }}{% if not message['role'] == 'user' %}{{ '[|endofturn|]' }}{% endif %}{% if not loop.last %}{{ '\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n[|assistant|]<thought>\n' }}{% endif %}" , 
340+ 		ollama : { 
341+ 			template :
342+ 				'{{- range $i, $_ := .Messages }}\n{{- $last := eq (len (slice $.Messages $i)) 1 -}}\n{{ if eq .Role "system" }}[|system|]{{ .Content }}[|endofturn|]\n{{ continue }}\n{{ else if eq .Role "user" }}[|user|]{{ .Content }}\n{{ else if eq .Role "assistant" }}[|assistant|]{{ .Content }}[|endofturn|]\n{{ end }}\n{{- if and (ne .Role "assistant") $last }}[|assistant|]{{ end }}\n{{- end -}}' , 
343+ 			tokens : [ "<thought>" ] , 
344+ 			params : { 
345+ 				repeat_penalty : 1 , 
346+ 				stop : [ "[|endofturn|]" ] , 
347+ 				temperature : 0.6 , 
348+ 				top_p : 0.95 , 
349+ 			} , 
350+ 		} , 
351+ 	} , 
326352	{ 
327353		model : "library/exaone3.5:7.8b" , 
328354		gguf : "{% for message in messages %}{% if loop.first and message['role'] != 'system' %}{{ '[|system|][|endofturn|]\n' }}{% endif %}{{ '[|' + message['role'] + '|]' + message['content'] }}{% if message['role'] == 'user' %}{{ '\n' }}{% else %}{{ '[|endofturn|]\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[|assistant|]' }}{% endif %}" , 
0 commit comments