Skip to content

Commit a8b6c7f

Browse files
authored
Merge pull request #362 from vladfaust/cb_eval
Add `cb_eval` to context params
2 parents 78de7a1 + cda0cf8 commit a8b6c7f

File tree

1 file changed

+40
-0
lines changed

1 file changed

+40
-0
lines changed

llama-cpp-2/src/context/params.rs

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,46 @@ impl LlamaContextParams {
339339
self.context_params.embeddings = embedding;
340340
self
341341
}
342+
343+
/// Set the evaluation callback.
344+
///
345+
/// # Examples
346+
///
347+
/// ```no_run
348+
/// extern "C" fn cb_eval_fn(
349+
/// t: *mut llama_cpp_sys_2::ggml_tensor,
350+
/// ask: bool,
351+
/// user_data: *mut std::ffi::c_void,
352+
/// ) -> bool {
353+
/// false
354+
/// }
355+
///
356+
/// use llama_cpp_2::context::params::LlamaContextParams;
357+
/// let params = LlamaContextParams::default();
358+
/// params.with_cb_eval(Some(cb_eval_fn));
359+
/// ```
360+
pub fn with_cb_eval(
361+
mut self,
362+
cb_eval: llama_cpp_sys_2::ggml_backend_sched_eval_callback,
363+
) -> Self {
364+
self.context_params.cb_eval = cb_eval;
365+
self
366+
}
367+
368+
/// Set the evaluation callback user data.
369+
///
370+
/// # Examples
371+
///
372+
/// ```no_run
373+
/// use llama_cpp_2::context::params::LlamaContextParams;
374+
/// let params = LlamaContextParams::default();
375+
/// let user_data = std::ptr::null_mut();
376+
/// params.with_cb_eval_user_data(user_data);
377+
/// ```
378+
pub fn with_cb_eval_user_data(mut self, cb_eval_user_data: *mut std::ffi::c_void) -> Self {
379+
self.context_params.cb_eval_user_data = cb_eval_user_data;
380+
self
381+
}
342382
}
343383

344384
/// Default parameters for `LlamaContext`. (as defined in llama.cpp by `llama_context_default_params`)

0 commit comments

Comments
 (0)