Skip to content

Commit aec18f1

Browse files
committed
clippy
1 parent c631133 commit aec18f1

File tree

4 files changed

+24
-20
lines changed

4 files changed

+24
-20
lines changed

llama-cpp-2/benches/grammar_bias.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,9 @@ fn criterion_benchmark(c: &mut Criterion) {
3030
.unwrap();
3131
let backend = LlamaBackend::init().unwrap();
3232
let model_params = LlamaModelParams::default();
33-
let model = LlamaModel::load_from_file(&backend, &file, &model_params).unwrap();
33+
let model = LlamaModel::load_from_file(&backend, file, &model_params).unwrap();
3434
let mut ctx = model
35-
.new_context(&backend, &LlamaContextParams::default())
35+
.new_context(&backend, LlamaContextParams::default())
3636
.unwrap();
3737
let grammar = LlamaGrammar::from_str(include_str!("../src/grammar/json.gbnf")).unwrap();
3838

llama-cpp-2/examples/simple.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
//! This is an translation of simple.cpp in llama.cpp using llama-cpp-2.
2-
#![allow(clippy::cast_possible_wrap, clippy::cast_possible_truncation)]
2+
#![allow(clippy::cast_possible_wrap, clippy::cast_possible_truncation, clippy::cast_precision_loss, clippy::cast_sign_loss)]
33

44
use anyhow::{bail, Context, Result};
55
use clap::Parser;

llama-cpp-2/src/context/params.rs

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ impl LlamaContextParams {
8484
/// let params = params.with_seed(1234);
8585
/// assert_eq!(params.seed(), 1234);
8686
/// ```
87-
pub fn with_seed(mut self, seed: u32) -> Self {
87+
#[must_use] pub fn with_seed(mut self, seed: u32) -> Self {
8888
self.context_params.seed = seed;
8989
self
9090
}
@@ -99,7 +99,7 @@ impl LlamaContextParams {
9999
/// .with_seed(1234);
100100
/// assert_eq!(params.seed(), 1234);
101101
/// ```
102-
pub fn seed(&self) -> u32 {
102+
#[must_use] pub fn seed(&self) -> u32 {
103103
self.context_params.seed
104104
}
105105

@@ -114,8 +114,8 @@ impl LlamaContextParams {
114114
/// let params = params.with_n_ctx(NonZeroU32::new(2048));
115115
/// assert_eq!(params.n_ctx(), NonZeroU32::new(2048));
116116
/// ```
117-
pub fn with_n_ctx(mut self, n_ctx: Option<NonZeroU32>) -> Self {
118-
self.context_params.n_ctx = n_ctx.map_or(0, |n_ctx| n_ctx.get());
117+
#[must_use] pub fn with_n_ctx(mut self, n_ctx: Option<NonZeroU32>) -> Self {
118+
self.context_params.n_ctx = n_ctx.map_or(0, std::num::NonZeroU32::get);
119119
self
120120
}
121121

@@ -128,11 +128,11 @@ impl LlamaContextParams {
128128
/// ```rust
129129
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
130130
/// assert_eq!(params.n_ctx(), std::num::NonZeroU32::new(512));
131-
pub fn n_ctx(&self) -> Option<NonZeroU32> {
131+
#[must_use] pub fn n_ctx(&self) -> Option<NonZeroU32> {
132132
NonZeroU32::new(self.context_params.n_ctx)
133133
}
134134

135-
/// Set the n_batch
135+
/// Set the `n_batch`
136136
///
137137
/// # Examples
138138
///
@@ -143,12 +143,12 @@ impl LlamaContextParams {
143143
/// .with_n_batch(2048);
144144
/// assert_eq!(params.n_batch(), 2048);
145145
/// ```
146-
pub fn with_n_batch(mut self, n_batch: u32) -> Self {
146+
#[must_use] pub fn with_n_batch(mut self, n_batch: u32) -> Self {
147147
self.context_params.n_batch = n_batch;
148148
self
149149
}
150150

151-
/// Get the n_batch
151+
/// Get the `n_batch`
152152
///
153153
/// # Examples
154154
///
@@ -157,7 +157,7 @@ impl LlamaContextParams {
157157
/// let params = LlamaContextParams::default();
158158
/// assert_eq!(params.n_batch(), 512);
159159
/// ```
160-
pub fn n_batch(&self) -> u32 {
160+
#[must_use] pub fn n_batch(&self) -> u32 {
161161
self.context_params.n_batch
162162
}
163163

@@ -171,7 +171,7 @@ impl LlamaContextParams {
171171
/// .with_rope_scaling_type(RopeScalingType::Linear);
172172
/// assert_eq!(params.rope_scaling_type(), RopeScalingType::Linear);
173173
/// ```
174-
pub fn with_rope_scaling_type(mut self, rope_scaling_type: RopeScalingType) -> Self {
174+
#[must_use] pub fn with_rope_scaling_type(mut self, rope_scaling_type: RopeScalingType) -> Self {
175175
self.context_params.rope_scaling_type = i32::from(rope_scaling_type);
176176
self
177177
}
@@ -184,7 +184,7 @@ impl LlamaContextParams {
184184
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
185185
/// assert_eq!(params.rope_scaling_type(), llama_cpp_2::context::params::RopeScalingType::Unspecified);
186186
/// ```
187-
pub fn rope_scaling_type(&self) -> RopeScalingType {
187+
#[must_use] pub fn rope_scaling_type(&self) -> RopeScalingType {
188188
RopeScalingType::from(self.context_params.rope_scaling_type)
189189
}
190190

@@ -198,7 +198,7 @@ impl LlamaContextParams {
198198
/// .with_rope_freq_base(0.5);
199199
/// assert_eq!(params.rope_freq_base(), 0.5);
200200
/// ```
201-
pub fn with_rope_freq_base(mut self, rope_freq_base: f32) -> Self {
201+
#[must_use] pub fn with_rope_freq_base(mut self, rope_freq_base: f32) -> Self {
202202
self.context_params.rope_freq_base = rope_freq_base;
203203
self
204204
}
@@ -211,7 +211,7 @@ impl LlamaContextParams {
211211
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
212212
/// assert_eq!(params.rope_freq_base(), 0.0);
213213
/// ```
214-
pub fn rope_freq_base(&self) -> f32 {
214+
#[must_use] pub fn rope_freq_base(&self) -> f32 {
215215
self.context_params.rope_freq_base
216216
}
217217

@@ -225,7 +225,7 @@ impl LlamaContextParams {
225225
/// .with_rope_freq_scale(0.5);
226226
/// assert_eq!(params.rope_freq_scale(), 0.5);
227227
/// ```
228-
pub fn with_rope_freq_scale(mut self, rope_freq_scale: f32) -> Self {
228+
#[must_use] pub fn with_rope_freq_scale(mut self, rope_freq_scale: f32) -> Self {
229229
self.context_params.rope_freq_scale = rope_freq_scale;
230230
self
231231
}
@@ -238,7 +238,7 @@ impl LlamaContextParams {
238238
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
239239
/// assert_eq!(params.rope_freq_scale(), 0.0);
240240
/// ```
241-
pub fn rope_freq_scale(&self) -> f32 {
241+
#[must_use] pub fn rope_freq_scale(&self) -> f32 {
242242
self.context_params.rope_freq_scale
243243
}
244244

@@ -250,7 +250,7 @@ impl LlamaContextParams {
250250
/// let params = llama_cpp_2::context::params::LlamaContextParams::default();
251251
/// assert_eq!(params.n_threads(), 4);
252252
/// ```
253-
pub fn n_threads(&self) -> u32 {
253+
#[must_use] pub fn n_threads(&self) -> u32 {
254254
self.context_params.n_threads
255255
}
256256

@@ -264,7 +264,7 @@ impl LlamaContextParams {
264264
/// .with_n_threads(8);
265265
/// assert_eq!(params.n_threads(), 8);
266266
/// ```
267-
pub fn with_n_threads(mut self, n_threads: u32) -> Self {
267+
#[must_use] pub fn with_n_threads(mut self, n_threads: u32) -> Self {
268268
self.context_params.n_threads = n_threads;
269269
self
270270
}

llama-cpp-2/src/llama_batch.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,10 @@ impl LlamaBatch {
3737
///
3838
/// - [`self.llama_batch.n_tokens`] does not fit into a usize
3939
/// - [`seq_ids.len()`] does not fit into a [`llama_seq_id`]
40+
///
41+
/// # Errors
42+
///
43+
/// returns a error if there is insufficient space in the buffer
4044
pub fn add(
4145
&mut self,
4246
LlamaToken(id): LlamaToken,

0 commit comments

Comments
 (0)