We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d5ce3fe commit 698cb80Copy full SHA for 698cb80
llama_cpp/llama_cpp.py
@@ -1367,7 +1367,7 @@ def llama_model_n_head(model: llama_model_p, /) -> int:
1367
# LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model);
1368
@ctypes_function("llama_model_n_head_kv", [llama_model_p_ctypes], ctypes.c_int32)
1369
def llama_model_n_head_kv(model: llama_model_p, /) -> int:
1370
-
+ ...
1371
1372
# // Get the model's RoPE frequency scaling factor
1373
# LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model);
0 commit comments