Skip to content

Commit 526ab5a

Browse files
committed
integrate new chat template for smollm3 example
1 parent 9621cbf commit 526ab5a

File tree

1 file changed

+32
-36
lines changed
  • candle-examples/examples/smollm3

1 file changed

+32
-36
lines changed

candle-examples/examples/smollm3/main.rs

Lines changed: 32 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ use std::io::Write;
1010

1111
use candle::{DType, Device, Tensor};
1212
use candle_examples::token_output_stream::TokenOutputStream;
13+
use candle_examples::chat_template::{ChatTemplate, Message, ChatTemplateOptions};
14+
1315
use candle_nn::VarBuilder;
1416
use candle_transformers::generation::{LogitsProcessor, Sampling};
1517
use hf_hub::{api::sync::Api, Repo, RepoType};
@@ -310,45 +312,39 @@ fn load_full_model(args: &Args, device: &Device) -> Result<SmolLM3Model> {
310312
// ==================== Text Generation ====================
311313

312314
fn format_prompt(prompt: &str, use_chat_template: bool, enable_thinking: bool) -> String {
313-
if use_chat_template {
314-
// Generate current date dynamically
315-
let now = chrono::Local::now();
316-
let today_date = now.format("%d %B %Y").to_string();
317-
318-
// Set reasoning mode based on thinking flag
319-
let reasoning_mode = if enable_thinking {
320-
"/think"
321-
} else {
322-
"/no_think"
323-
};
315+
if !use_chat_template {
316+
return prompt.to_string();
317+
}
324318

325-
// Build the assistant start with or without thinking tags
326-
let assistant_start = if enable_thinking {
327-
"<|im_start|>assistant\n<think>\n" // Open for reasoning
328-
} else {
329-
"<|im_start|>assistant\n<think>\n\n</think>\n" // Empty = skip reasoning
330-
};
319+
let template = ChatTemplate::chatml_with_thinking();
320+
321+
// Build system message with SmolLM3's metadata format
322+
let now = chrono::Local::now();
323+
let today_date = now.format("%d %B %Y").to_string();
324+
let reasoning_mode = if enable_thinking { "/think" } else { "/no_think" };
325+
326+
let system_content = format!(
327+
"## Metadata\n\n\
328+
Knowledge Cutoff Date: June 2025\n\
329+
Today Date: {}\n\
330+
Reasoning Mode: {}\n\n\
331+
## Custom Instructions\n\n\
332+
You are a helpful AI assistant named SmolLM, trained by Hugging Face.",
333+
today_date, reasoning_mode
334+
);
335+
336+
let messages = vec![
337+
Message::system(system_content),
338+
Message::user(prompt),
339+
];
331340

332-
format!(
333-
"<|im_start|>system\n\
334-
## Metadata\n\
335-
\n\
336-
Knowledge Cutoff Date: June 2025\n\
337-
Today Date: {}\n\
338-
Reasoning Mode: {}\n\
339-
\n\
340-
## Custom Instructions\n\
341-
\n\
342-
You are a helpful AI assistant named SmolLM, trained by Hugging Face.\n\
343-
\n\
344-
<|im_start|>user\n\
345-
{}<|im_end|>\n\
346-
{}",
347-
today_date, reasoning_mode, prompt, assistant_start
348-
)
341+
let options = if enable_thinking {
342+
ChatTemplateOptions::for_generation().with_thinking()
349343
} else {
350-
prompt.to_string()
351-
}
344+
ChatTemplateOptions::for_generation()
345+
};
346+
347+
template.apply(&messages, &options).unwrap()
352348
}
353349

354350
fn get_eos_token(tokenizer: &Tokenizer, config: &ModelConfig) -> u32 {

0 commit comments

Comments
 (0)