@@ -267,7 +267,7 @@ class Opt {
267267 " Commands:\n "
268268 " model\n "
269269 " Model is a string with an optional prefix of \n "
270- " huggingface:// (hf://), ollama://, https:// or file://.\n "
270+ " huggingface:// (hf://), modelscope:// (ms://), ollama://, https:// or file://.\n "
271271 " If no protocol is specified and a file exists in the specified\n "
272272 " path, file:// is assumed, otherwise if a file does not exist in\n "
273273 " the specified path, ollama:// is assumed. Models that are being\n "
@@ -282,6 +282,9 @@ class Opt {
282282 " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n "
283283 " llama-run "
284284 " huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n "
285+ " llama-run ms://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n "
286+ " llama-run "
287+ " modelscope://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n "
285288 " llama-run https://example.com/some-file1.gguf\n "
286289 " llama-run some-file2.gguf\n "
287290 " llama-run file://some-file3.gguf\n "
@@ -689,16 +692,14 @@ class LlamaData {
689692 return 0 ;
690693 }
691694
692- int huggingface_dl ( std::string & model, const std::string & bn) {
695+ int dl_from_endpoint (std::string & model_endpoint, std::string & model, const std::string & bn) {
693696 // Find the second occurrence of '/' after protocol string
694697 size_t pos = model.find (' /' );
695698 pos = model.find (' /' , pos + 1 );
696699 std::string hfr, hff;
697700 std::vector<std::string> headers = { " User-Agent: llama-cpp" , " Accept: application/json" };
698701 std::string url;
699702
700- std::string model_endpoint = get_model_endpoint ();
701-
702703 if (pos == std::string::npos) {
703704 auto [model_name, manifest_url] = extract_model_and_tag (model, model_endpoint + " v2/" );
704705 hfr = model_name;
@@ -720,6 +721,16 @@ class LlamaData {
720721 return download (url, bn, true , headers);
721722 }
722723
724+ int modelscope_dl (std::string & model, const std::string & bn) {
725+ std::string model_endpoint = " https://modelscope.cn/models/" ;
726+ return dl_from_endpoint (model_endpoint, model, bn);
727+ }
728+
729+ int huggingface_dl (std::string & model, const std::string & bn) {
730+ std::string model_endpoint = get_model_endpoint ();
731+ return dl_from_endpoint (model_endpoint, model, bn);
732+ }
733+
723734 int ollama_dl (std::string & model, const std::string & bn) {
724735 const std::vector<std::string> headers = { " Accept: application/vnd.docker.distribution.manifest.v2+json" };
725736 if (model.find (' /' ) == std::string::npos) {
@@ -837,6 +848,9 @@ class LlamaData {
837848 rm_until_substring (model_, " hf.co/" );
838849 rm_until_substring (model_, " ://" );
839850 ret = huggingface_dl (model_, bn);
851+ } else if (string_starts_with (model_, " ms://" ) || string_starts_with (model_, " modelscope://" )) {
852+ rm_until_substring (model_, " ://" );
853+ ret = modelscope_dl (model_, bn);
840854 } else if ((string_starts_with (model_, " https://" ) || string_starts_with (model_, " http://" )) &&
841855 !string_starts_with (model_, " https://ollama.com/library/" )) {
842856 ret = download (model_, bn, true );
0 commit comments