@@ -819,9 +819,9 @@ std::string fs_get_cache_file(const std::string & filename) {
819819//
820820// Model utils
821821//
822- struct common_init_result common_init_from_common_params (common_params & params) {
822+ struct common_init_result common_init_from_params (common_params & params) {
823823 common_init_result iparams;
824- auto mparams = common_model_params_from_common_params (params);
824+ auto mparams = common_model_params_to_llama (params);
825825
826826 llama_model * model = nullptr ;
827827
@@ -863,7 +863,7 @@ struct common_init_result common_init_from_common_params(common_params & params)
863863 }
864864 }
865865
866- auto cparams = common_context_params_from_common_params (params);
866+ auto cparams = common_context_params_to_llama (params);
867867
868868 llama_context * lctx = llama_new_context_with_model (model, cparams);
869869 if (lctx == NULL ) {
@@ -970,7 +970,7 @@ void common_lora_adapters_apply(struct llama_context * ctx, std::vector<common_l
970970 }
971971}
972972
973- struct llama_model_params common_model_params_from_common_params (const common_params & params) {
973+ struct llama_model_params common_model_params_to_llama (const common_params & params) {
974974 auto mparams = llama_model_default_params ();
975975
976976 if (params.n_gpu_layers != -1 ) {
@@ -1022,7 +1022,7 @@ static ggml_type kv_cache_type_from_str(const std::string & s) {
10221022 throw std::runtime_error (" Invalid cache type: " + s);
10231023}
10241024
1025- struct llama_context_params common_context_params_from_common_params (const common_params & params) {
1025+ struct llama_context_params common_context_params_to_llama (const common_params & params) {
10261026 auto cparams = llama_context_default_params ();
10271027
10281028 cparams.n_ctx = params.n_ctx ;
0 commit comments