@@ -829,9 +829,9 @@ struct common_init_result common_init_from_params(common_params & params) {
829829 llama_model * model = nullptr ;
830830
831831 if (!params.hf_repo .empty () && !params.hf_file .empty ()) {
832- model = common_load_model_from_hf (params.hf_repo . c_str () , params.hf_file . c_str () , params.model . c_str () , params.hf_token . c_str () , mparams);
832+ model = common_load_model_from_hf (params.hf_repo , params.hf_file , params.model , params.hf_token , mparams);
833833 } else if (!params.model_url .empty ()) {
834- model = common_load_model_from_url (params.model_url . c_str () , params.model . c_str () , params.hf_token . c_str () , mparams);
834+ model = common_load_model_from_url (params.model_url , params.model , params.hf_token , mparams);
835835 } else {
836836 model = llama_load_model_from_file (params.model .c_str (), mparams);
837837 }
@@ -1342,17 +1342,17 @@ static bool common_download_file(const std::string & url, const std::string & pa
13421342}
13431343
13441344struct llama_model * common_load_model_from_url (
1345- const char * model_url,
1346- const char * path_model ,
1347- const char * hf_token,
1345+ const std::string & model_url,
1346+ const std::string & local_path ,
1347+ const std::string & hf_token,
13481348 const struct llama_model_params & params) {
13491349 // Basic validation of the model_url
1350- if (! model_url || strlen (model_url) == 0 ) {
1350+ if (model_url. empty () ) {
13511351 LOG_ERR (" %s: invalid model_url\n " , __func__);
13521352 return NULL ;
13531353 }
13541354
1355- if (!common_download_file (model_url, path_model , hf_token)) {
1355+ if (!common_download_file (model_url, local_path , hf_token)) {
13561356 return NULL ;
13571357 }
13581358
@@ -1363,9 +1363,9 @@ struct llama_model * common_load_model_from_url(
13631363 /* .no_alloc = */ true ,
13641364 /* .ctx = */ NULL ,
13651365 };
1366- auto * ctx_gguf = gguf_init_from_file (path_model , gguf_params);
1366+ auto * ctx_gguf = gguf_init_from_file (local_path. c_str () , gguf_params);
13671367 if (!ctx_gguf) {
1368- LOG_ERR (" \n %s: failed to load input GGUF from %s\n " , __func__, path_model );
1368+ LOG_ERR (" \n %s: failed to load input GGUF from %s\n " , __func__, local_path. c_str () );
13691369 return NULL ;
13701370 }
13711371
@@ -1384,13 +1384,13 @@ struct llama_model * common_load_model_from_url(
13841384 // Verify the first split file format
13851385 // and extract split URL and PATH prefixes
13861386 {
1387- if (!llama_split_prefix (split_prefix, sizeof (split_prefix), path_model , 0 , n_split)) {
1388- LOG_ERR (" \n %s: unexpected model file name: %s n_split=%d\n " , __func__, path_model , n_split);
1387+ if (!llama_split_prefix (split_prefix, sizeof (split_prefix), local_path. c_str () , 0 , n_split)) {
1388+ LOG_ERR (" \n %s: unexpected model file name: %s n_split=%d\n " , __func__, local_path. c_str () , n_split);
13891389 return NULL ;
13901390 }
13911391
1392- if (!llama_split_prefix (split_url_prefix, sizeof (split_url_prefix), model_url, 0 , n_split)) {
1393- LOG_ERR (" \n %s: unexpected model url: %s n_split=%d\n " , __func__, model_url, n_split);
1392+ if (!llama_split_prefix (split_url_prefix, sizeof (split_url_prefix), model_url. c_str () , 0 , n_split)) {
1393+ LOG_ERR (" \n %s: unexpected model url: %s n_split=%d\n " , __func__, model_url. c_str () , n_split);
13941394 return NULL ;
13951395 }
13961396 }
@@ -1417,14 +1417,14 @@ struct llama_model * common_load_model_from_url(
14171417 }
14181418 }
14191419
1420- return llama_load_model_from_file (path_model , params);
1420+ return llama_load_model_from_file (local_path. c_str () , params);
14211421}
14221422
14231423struct llama_model * common_load_model_from_hf (
1424- const char * repo,
1425- const char * model ,
1426- const char * path_model ,
1427- const char * hf_token,
1424+ const std::string & repo,
1425+ const std::string & remote_path ,
1426+ const std::string & local_path ,
1427+ const std::string & hf_token,
14281428 const struct llama_model_params & params) {
14291429 // construct hugging face model url:
14301430 //
@@ -1438,27 +1438,27 @@ struct llama_model * common_load_model_from_hf(
14381438 std::string model_url = " https://huggingface.co/" ;
14391439 model_url += repo;
14401440 model_url += " /resolve/main/" ;
1441- model_url += model ;
1441+ model_url += remote_path ;
14421442
1443- return common_load_model_from_url (model_url. c_str (), path_model , hf_token, params);
1443+ return common_load_model_from_url (model_url, local_path , hf_token, params);
14441444}
14451445
14461446#else
14471447
14481448struct llama_model * common_load_model_from_url (
1449- const char * /* model_url*/ ,
1450- const char * /* path_model */ ,
1451- const char * /* hf_token*/ ,
1449+ const std::string & /* model_url*/ ,
1450+ const std::string & /* local_path */ ,
1451+ const std::string & /* hf_token*/ ,
14521452 const struct llama_model_params & /* params*/ ) {
14531453 LOG_WRN (" %s: llama.cpp built without libcurl, downloading from an url not supported.\n " , __func__);
14541454 return nullptr ;
14551455}
14561456
14571457struct llama_model * common_load_model_from_hf (
1458- const char * /* repo*/ ,
1459- const char * /* model */ ,
1460- const char * /* path_model */ ,
1461- const char * /* hf_token*/ ,
1458+ const std::string & /* repo*/ ,
1459+ const std::string & /* remote_path */ ,
1460+ const std::string & /* local_path */ ,
1461+ const std::string & /* hf_token*/ ,
14621462 const struct llama_model_params & /* params*/ ) {
14631463 LOG_WRN (" %s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n " , __func__);
14641464 return nullptr ;
0 commit comments