Skip to content

Commit b5cb56b

Browse files
committed
Clippy
Signed-off-by: Dennis Keck <[email protected]>
1 parent a40ed28 commit b5cb56b

File tree

2 files changed

+57
-60
lines changed

2 files changed

+57
-60
lines changed

examples/mtmd/src/mtmd.rs

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ use llama_cpp_2::context::params::LlamaContextParams;
1010
use llama_cpp_2::context::LlamaContext;
1111
use llama_cpp_2::llama_batch::LlamaBatch;
1212
use llama_cpp_2::model::params::LlamaModelParams;
13-
use llama_cpp_2::mtmd::*;
13+
use llama_cpp_2::mtmd::{MtmdBitmap, MtmdBitmapError, MtmdContext, MtmdContextParams, MtmdInputText};
1414

1515
use llama_cpp_2::llama_backend::LlamaBackend;
1616
use llama_cpp_2::model::{LlamaChatMessage, LlamaChatTemplate, LlamaModel, Special};
@@ -83,6 +83,8 @@ pub struct MtmdCliContext {
8383

8484
impl MtmdCliContext {
8585
/// Creates a new MTMD CLI context
86+
///
87+
/// # Errors
8688
pub fn new(
8789
params: &MtmdCliParams,
8890
model: &LlamaModel,
@@ -101,11 +103,11 @@ impl MtmdCliContext {
101103
)?,
102104
};
103105

104-
let mtmd_ctx = MtmdContext::init_from_file(&params.mmproj_path, model, mtmd_params)?;
106+
let mtmd_ctx = MtmdContext::init_from_file(&params.mmproj_path, model, &mtmd_params)?;
105107

106108
let chat_template = model
107109
.chat_template(params.chat_template.as_deref())
108-
.map_err(|e| format!("Failed to get chat template: {}", e))?;
110+
.map_err(|e| format!("Failed to get chat template: {e}"))?;
109111

110112
let batch = LlamaBatch::new(params.n_tokens, 1);
111113

@@ -120,13 +122,15 @@ impl MtmdCliContext {
120122
}
121123

122124
/// Loads media (image or audio) from the specified file path
125+
/// # Errors
123126
pub fn load_media(&mut self, path: &str) -> Result<(), MtmdBitmapError> {
124127
let bitmap = MtmdBitmap::from_file(&self.mtmd_ctx, path)?;
125128
self.bitmaps.push(bitmap);
126129
Ok(())
127130
}
128131

129132
/// Evaluates a chat message, tokenizing and processing it through the model
133+
/// # Errors
130134
pub fn eval_message(
131135
&mut self,
132136
model: &LlamaModel,
@@ -161,11 +165,12 @@ impl MtmdCliContext {
161165
// Clear bitmaps after tokenization
162166
self.bitmaps.clear();
163167

164-
self.n_past = chunks.eval_chunks(&self.mtmd_ctx, &context, 0, 0, 1, true)?;
168+
self.n_past = chunks.eval_chunks(&self.mtmd_ctx, context, 0, 0, 1, true)?;
165169
Ok(())
166170
}
167171

168172
/// Generates a response by sampling tokens from the model
173+
/// # Errors
169174
pub fn generate_response(
170175
&mut self,
171176
model: &LlamaModel,
@@ -190,7 +195,7 @@ impl MtmdCliContext {
190195

191196
// Print token
192197
let piece = model.token_to_str(token, Special::Tokenize)?;
193-
print!("{}", piece);
198+
print!("{piece}");
194199
io::stdout().flush()?;
195200

196201
// Prepare next batch
@@ -223,7 +228,7 @@ fn run_single_turn(
223228

224229
// Load media files
225230
for image_path in &params.images {
226-
println!("Loading image: {}", image_path);
231+
println!("Loading image: {image_path}");
227232
ctx.load_media(image_path)?;
228233
}
229234
for audio_path in &params.audio {
@@ -233,7 +238,7 @@ fn run_single_turn(
233238
// Create user message
234239
let msg = LlamaChatMessage::new("user".to_string(), prompt)?;
235240

236-
println!("Evaluating message: {:?}", msg);
241+
println!("Evaluating message: {msg:?}");
237242

238243
// Evaluate the message (prefill)
239244
ctx.eval_message(model, context, msg, true)?;
@@ -269,7 +274,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
269274
// Setup model parameters
270275
let mut model_params = LlamaModelParams::default();
271276
if !params.no_gpu {
272-
model_params = model_params.with_n_gpu_layers(1000000); // Use all layers on GPU
277+
model_params = model_params.with_n_gpu_layers(1_000_000); // Use all layers on GPU
273278
}
274279

275280
// Load model

0 commit comments

Comments
 (0)