@@ -385,6 +385,7 @@ extern "C" {
385385 } llama_chat_message;
386386
387387 // lora adapter
388+ // TODO: rename to llama_adapter_lora
388389 struct llama_lora_adapter ;
389390
390391 // Helpers for getting default parameters
@@ -501,31 +502,40 @@ extern "C" {
501502 const char * fname_out,
502503 const llama_model_quantize_params * params);
503504
505+ //
506+ // Adapters
507+ //
508+
504509 // Load a LoRA adapter from file
505510 // The loaded adapter will be associated to the given model, and will be free when the model is deleted
511+ // TODO: rename to llama_adapter_lora_init
506512 LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init (
507513 struct llama_model * model,
508514 const char * path_lora);
509515
510516 // Add a loaded LoRA adapter to given context
511517 // This will not modify model's weight
518+ // TODO: rename to llama_set_adapter_lora
512519 LLAMA_API int32_t llama_lora_adapter_set (
513520 struct llama_context * ctx,
514521 struct llama_lora_adapter * adapter,
515522 float scale);
516523
517524 // Remove a specific LoRA adapter from given context
518525 // Return -1 if the adapter is not present in the context
526+ // TODO: rename to llama_rm_adapter_lora
519527 LLAMA_API int32_t llama_lora_adapter_remove (
520528 struct llama_context * ctx,
521529 struct llama_lora_adapter * adapter);
522530
523531 // Remove all LoRA adapters from given context
532+ // TODO: rename to llama_clear_adapter_lora
524533 LLAMA_API void llama_lora_adapter_clear (
525534 struct llama_context * ctx);
526535
527536 // Manually free a LoRA adapter
528537 // Note: loaded adapters will be free when the associated model is deleted
538+ // TODO: rename to llama_adapter_lora_free
529539 LLAMA_API void llama_lora_adapter_free (struct llama_lora_adapter * adapter);
530540
531541 // Apply a loaded control vector to a llama_context, or if data is NULL, clear
@@ -534,6 +544,7 @@ extern "C" {
534544 // to an n_embd x n_layers buffer starting from layer 1.
535545 // il_start and il_end are the layer range the vector should apply to (both inclusive)
536546 // See llama_control_vector_load in common to load a control vector.
547+ // TODO: rename to llama_adapter_vec_apply
537548 LLAMA_API int32_t llama_control_vector_apply (
538549 struct llama_context * lctx,
539550 const float * data,
0 commit comments