Skip to content

Commit 3b5f7c2

Browse files
fix two more public APIs to use int32_t for n_threads
1 parent c49d634 commit 3b5f7c2

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

include/llama.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -847,10 +847,10 @@ extern "C" {
847847
LLAMA_API void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch);
848848

849849
// Get the number of threads used for generation of a single token.
850-
LLAMA_API int llama_n_threads(struct llama_context * ctx);
850+
LLAMA_API int32_t llama_n_threads(struct llama_context * ctx);
851851

852852
// Get the number of threads used for prompt and batch processing (multiple token).
853-
LLAMA_API int llama_n_threads_batch(struct llama_context * ctx);
853+
LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx);
854854

855855
// Set whether the model is in embeddings mode or not
856856
// If true, embeddings will be returned but logits will not

src/llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19394,11 +19394,11 @@ void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t
1939419394
ctx->cparams.n_threads_batch = n_threads_batch;
1939519395
}
1939619396

19397-
int llama_n_threads(struct llama_context * ctx) {
19397+
int32_t llama_n_threads(struct llama_context * ctx) {
1939819398
return ctx->cparams.n_threads;
1939919399
}
1940019400

19401-
int llama_n_threads_batch(struct llama_context * ctx) {
19401+
int32_t llama_n_threads_batch(struct llama_context * ctx) {
1940219402
return ctx->cparams.n_threads_batch;
1940319403
}
1940419404

0 commit comments

Comments
 (0)