We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent efce1f4 commit c392ec2Copy full SHA for c392ec2
llama_cpp/llama_cpp.py
@@ -1280,12 +1280,6 @@ def llama_n_ubatch(ctx: llama_context_p, /) -> int:
1280
def llama_n_seq_max(ctx: llama_context_p, /) -> int:
1281
...
1282
1283
-# LLAMA_API int32_t llama_model_n_head_kv(const struct llama_model * model);
1284
-@ctypes_function("llama_model_n_head_kv", [llama_model_p_ctypes], ctypes.c_uint32)
1285
-def llama_model_n_head_kv(model: llama_model_p, /) -> int:
1286
- ...
1287
-
1288
1289
# DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead");
1290
@ctypes_function("llama_n_ctx_train", [llama_model_p_ctypes], ctypes.c_int32)
1291
def llama_n_ctx_train(model: llama_model_p, /) -> int:
0 commit comments