Skip to content

Commit b910840

Browse files
authored
Merge branch 'main' into disable-llama-curl
2 parents ddd380a + 808c6c5 commit b910840

File tree

2 files changed

+6
-0
lines changed

2 files changed

+6
-0
lines changed

llama-cpp-2/src/lib.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,8 @@ pub enum LlamaLoraAdapterRemoveError {
217217
/// get the time (in microseconds) according to llama.cpp
218218
/// ```
219219
/// # use llama_cpp_2::llama_time_us;
220+
/// # use llama_cpp_2::llama_backend::LlamaBackend;
221+
/// let backend = LlamaBackend::init().unwrap();
220222
/// let time = llama_time_us();
221223
/// assert!(time > 0);
222224
/// ```
@@ -311,6 +313,8 @@ pub enum ApplyChatTemplateError {
311313
///
312314
/// ```
313315
/// # use std::time::Duration;
316+
/// # use llama_cpp_2::llama_backend::LlamaBackend;
317+
/// let backend = LlamaBackend::init().unwrap();
314318
/// use llama_cpp_2::ggml_time_us;
315319
///
316320
/// let start = ggml_time_us();

llama-cpp-2/src/sampling.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,8 @@ impl LlamaSampler {
117117
/// data_array::LlamaTokenDataArray
118118
/// };
119119
/// use llama_cpp_2::sampling::LlamaSampler;
120+
/// use llama_cpp_2::llama_backend::LlamaBackend;
121+
/// let backend = LlamaBackend::init().unwrap();
120122
///
121123
/// let mut data_array = LlamaTokenDataArray::new(vec![
122124
/// LlamaTokenData::new(LlamaToken(0), 0., 0.),

0 commit comments

Comments
 (0)