Skip to content

Commit 071c2f9

Browse files
committed
fixed change to i32 for n_thread
1 parent 5c1468a commit 071c2f9

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

examples/simple/src/main.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,12 +54,12 @@ struct Args {
5454
long,
5555
help = "number of threads to use during generation (default: use all available threads)"
5656
)]
57-
threads: Option<u32>,
57+
threads: Option<i32>,
5858
#[arg(
5959
long,
6060
help = "number of threads to use during batch and prompt processing (default: use all available threads)"
6161
)]
62-
threads_batch: Option<u32>,
62+
threads_batch: Option<i32>,
6363
#[arg(
6464
short = 'c',
6565
long,

llama-cpp-2/src/context/params.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ impl LlamaContextParams {
262262
/// assert_eq!(params.n_threads(), 4);
263263
/// ```
264264
#[must_use]
265-
pub fn n_threads(&self) -> u32 {
265+
pub fn n_threads(&self) -> i32 {
266266
self.context_params.n_threads
267267
}
268268

@@ -275,7 +275,7 @@ impl LlamaContextParams {
275275
/// assert_eq!(params.n_threads_batch(), 4);
276276
/// ```
277277
#[must_use]
278-
pub fn n_threads_batch(&self) -> u32 {
278+
pub fn n_threads_batch(&self) -> i32 {
279279
self.context_params.n_threads_batch
280280
}
281281

@@ -290,7 +290,7 @@ impl LlamaContextParams {
290290
/// assert_eq!(params.n_threads(), 8);
291291
/// ```
292292
#[must_use]
293-
pub fn with_n_threads(mut self, n_threads: u32) -> Self {
293+
pub fn with_n_threads(mut self, n_threads: i32) -> Self {
294294
self.context_params.n_threads = n_threads;
295295
self
296296
}
@@ -306,7 +306,7 @@ impl LlamaContextParams {
306306
/// assert_eq!(params.n_threads_batch(), 8);
307307
/// ```
308308
#[must_use]
309-
pub fn with_n_threads_batch(mut self, n_threads: u32) -> Self {
309+
pub fn with_n_threads_batch(mut self, n_threads: i32) -> Self {
310310
self.context_params.n_threads_batch = n_threads;
311311
self
312312
}

llama-cpp-sys-2/llama.cpp

0 commit comments

Comments
 (0)