Skip to content

Commit 1db1d5b

Browse files
committed
llama : expose layer device type via C API
1 parent abf2410 commit 1db1d5b

File tree

2 files changed

+6
-0
lines changed

2 files changed

+6
-0
lines changed

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -511,6 +511,7 @@ extern "C" {
511511
LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model);
512512
LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model);
513513
LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model);
514+
LLAMA_API int32_t llama_model_dev_layer (const struct llama_model * model, int32_t il);
514515
LLAMA_API int32_t llama_model_n_head (const struct llama_model * model);
515516
LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model);
516517
LLAMA_API int32_t llama_model_n_swa (const struct llama_model * model);

src/llama-model.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14182,6 +14182,11 @@ int32_t llama_model_n_layer(const llama_model * model) {
1418214182
return model->hparams.n_layer;
1418314183
}
1418414184

14185+
int32_t llama_model_dev_layer(const llama_model * model, int32_t il) {
14186+
ggml_backend_dev_t dev = model->dev_layer(il);
14187+
return static_cast<int32_t>(ggml_backend_dev_type(dev));
14188+
}
14189+
1418514190
int32_t llama_model_n_head(const llama_model * model) {
1418614191
return model->hparams.n_head();
1418714192
}

0 commit comments

Comments
 (0)