Skip to content

Commit f812d1f

Browse files
committed
fmt and clippy
1 parent 7c0d395 commit f812d1f

File tree

3 files changed

+19
-21
lines changed

3 files changed

+19
-21
lines changed

candle-examples/examples/smollm3/main.rs

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ use clap::{Parser, ValueEnum};
99
use std::io::Write;
1010

1111
use candle::{DType, Device, Tensor};
12+
use candle_examples::chat_template::{ChatTemplate, ChatTemplateOptions, Message};
1213
use candle_examples::token_output_stream::TokenOutputStream;
13-
use candle_examples::chat_template::{ChatTemplate, Message, ChatTemplateOptions};
1414

1515
use candle_nn::VarBuilder;
1616
use candle_transformers::generation::{LogitsProcessor, Sampling};
@@ -321,7 +321,11 @@ fn format_prompt(prompt: &str, use_chat_template: bool, enable_thinking: bool) -
321321
// Build system message with SmolLM3's metadata format
322322
let now = chrono::Local::now();
323323
let today_date = now.format("%d %B %Y").to_string();
324-
let reasoning_mode = if enable_thinking { "/think" } else { "/no_think" };
324+
let reasoning_mode = if enable_thinking {
325+
"/think"
326+
} else {
327+
"/no_think"
328+
};
325329

326330
let system_content = format!(
327331
"## Metadata\n\n\
@@ -333,10 +337,7 @@ fn format_prompt(prompt: &str, use_chat_template: bool, enable_thinking: bool) -
333337
today_date, reasoning_mode
334338
);
335339

336-
let messages = vec![
337-
Message::system(system_content),
338-
Message::user(prompt),
339-
];
340+
let messages = vec![Message::system(system_content), Message::user(prompt)];
340341

341342
let options = if enable_thinking {
342343
ChatTemplateOptions::for_generation().with_thinking()

candle-examples/src/chat_template.rs

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -190,8 +190,8 @@ impl ChatTemplate {
190190

191191
/// Load chat template from tokenizer_config.json content
192192
pub fn from_tokenizer_config_str(json: &str) -> Result<Self, ChatTemplateError> {
193-
let config: TokenConfig = serde_json::from_str(json)
194-
.map_err(|e| ChatTemplateError::ParseError(e.to_string()))?;
193+
let config: TokenConfig =
194+
serde_json::from_str(json).map_err(|e| ChatTemplateError::ParseError(e.to_string()))?;
195195

196196
let template = match config.chat_template {
197197
Some(ChatTemplateConfig::Single(t)) => t,
@@ -202,7 +202,7 @@ impl ChatTemplate {
202202
.find(|t| t.name == "default")
203203
.or_else(|| templates.first())
204204
.map(|t| t.template.clone())
205-
.ok_or_else(|| ChatTemplateError::NoTemplate)?
205+
.ok_or(ChatTemplateError::NoTemplate)?
206206
}
207207
None => return Err(ChatTemplateError::NoTemplate),
208208
};
@@ -460,10 +460,7 @@ mod tests {
460460
#[test]
461461
fn test_chatml_basic() {
462462
let template = ChatTemplate::chatml();
463-
let messages = vec![
464-
Message::system("You are helpful."),
465-
Message::user("Hello"),
466-
];
463+
let messages = vec![Message::system("You are helpful."), Message::user("Hello")];
467464

468465
let result = template.apply_for_generation(&messages).unwrap();
469466

@@ -493,7 +490,10 @@ mod tests {
493490
let messages = vec![Message::user("Think about this")];
494491

495492
let result = template
496-
.apply(&messages, &ChatTemplateOptions::for_generation().with_thinking())
493+
.apply(
494+
&messages,
495+
&ChatTemplateOptions::for_generation().with_thinking(),
496+
)
497497
.unwrap();
498498

499499
assert!(result.contains("<think>"));
@@ -502,10 +502,7 @@ mod tests {
502502
#[test]
503503
fn test_llama3_format() {
504504
let template = ChatTemplate::llama3();
505-
let messages = vec![
506-
Message::system("You are helpful."),
507-
Message::user("Hello"),
508-
];
505+
let messages = vec![Message::system("You are helpful."), Message::user("Hello")];
509506

510507
let result = template.apply_for_generation(&messages).unwrap();
511508

@@ -529,4 +526,4 @@ mod tests {
529526

530527
assert!(result.contains("user: test"));
531528
}
532-
}
529+
}

candle-transformers/src/models/smol/quantized_smollm3.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ fn reconstruct_qk_weights(gguf_weight: &Tensor, _num_heads: usize) -> Result<Ten
5757
heads.push(Tensor::stack(&head_odd, 0)?);
5858
}
5959

60-
Ok(Tensor::cat(&heads, 0)?)
60+
Tensor::cat(&heads, 0)
6161
}
6262

6363
#[derive(Debug, Clone)]
@@ -381,7 +381,7 @@ impl QuantizedDecoderLayer {
381381
layer_idx: usize,
382382
rotary_emb: Option<Arc<RotaryEmbedding>>,
383383
) -> Result<Self> {
384-
let attn_vb = vb.pp(&format!("blk.{layer_idx}"));
384+
let attn_vb = vb.pp(format!("blk.{layer_idx}"));
385385

386386
Ok(Self {
387387
self_attn: QuantizedAttention::new(attn_vb.clone(), cfg, layer_idx, rotary_emb)?,

0 commit comments

Comments
 (0)