@@ -10,7 +10,10 @@ use crate::llama_backend::LlamaBackend;
1010use crate :: model:: params:: LlamaModelParams ;
1111use crate :: token:: LlamaToken ;
1212use crate :: token_type:: LlamaTokenType ;
13- use crate :: { LlamaContextLoadError , LlamaModelLoadError , StringToTokenError , TokenToStringError } ;
13+ use crate :: {
14+ ChatTemplateError , LlamaContextLoadError , LlamaModelLoadError , StringToTokenError ,
15+ TokenToStringError ,
16+ } ;
1417
1518pub mod params;
1619
@@ -274,6 +277,35 @@ impl LlamaModel {
274277 unsafe { llama_cpp_sys_2:: llama_n_embd ( self . model . as_ptr ( ) ) }
275278 }
276279
280+ /// get chat template from model
281+ /// let chat_template = model.get_chat_template()?;
282+ ///
283+ pub fn get_chat_template ( & self ) -> Result < String , ChatTemplateError > {
284+ let chat_template: String = unsafe {
285+ // longest known template is about 1200 bytes from llama.cpp
286+ let chat_temp = match CString :: new ( Vec :: < u8 > :: with_capacity ( 2048 ) ) {
287+ Ok ( c) => c,
288+ Err ( _) => return Err ( ChatTemplateError :: NullReturn ) ,
289+ } ;
290+ let chat_ptr = chat_temp. into_raw ( ) ;
291+ let chat_name = match CString :: new ( "tokenizer.chat_template" ) {
292+ Ok ( c) => c,
293+ Err ( _) => return Err ( ChatTemplateError :: NullReturn ) ,
294+ } ;
295+ llama_cpp_sys_2:: llama_model_meta_val_str (
296+ self . model . as_ptr ( ) ,
297+ chat_name. as_ptr ( ) ,
298+ chat_ptr,
299+ 250 ,
300+ ) ;
301+ match CString :: from_raw ( chat_ptr) . to_str ( ) {
302+ Ok ( s) => s. to_string ( ) ,
303+ Err ( _) => return Err ( ChatTemplateError :: NullReturn ) ,
304+ }
305+ } ;
306+ Ok ( chat_template)
307+ }
308+
277309 /// loads a model from a file.
278310 ///
279311 /// # Errors
0 commit comments