Skip to content

Commit 932e858

Browse files
Merge pull request #54 from menloresearch/update-dev-from-master-2025-04-12-00-08
Sync master with upstream release b5121
2 parents 0d761fc + c94085d commit 932e858

28 files changed

+1413
-581
lines changed

.github/workflows/docker.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,13 +36,13 @@ jobs:
3636
matrix:
3737
config:
3838
# Multi-stage build
39-
- { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: false}
40-
- { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false}
41-
- { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: true}
42-
- { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false}
43-
- { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false}
39+
- { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: false }
40+
- { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
41+
- { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true }
42+
- { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
43+
- { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
4444
# Note: the rocm images are failing due to a compiler error and are disabled until this is fixed to allow the workflow to complete
45-
#- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: true }
45+
#- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: true }
4646
steps:
4747
- name: Check out the repo
4848
uses: actions/checkout@v4

README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
9797
- [x] [Flan T5](https://huggingface.co/models?search=flan-t5)
9898
- [x] [Open Elm models](https://huggingface.co/collections/apple/openelm-instruct-models-6619ad295d7ae9f868b759ca)
9999
- [x] [ChatGLM3-6b](https://huggingface.co/THUDM/chatglm3-6b) + [ChatGLM4-9b](https://huggingface.co/THUDM/glm-4-9b) + [GLMEdge-1.5b](https://huggingface.co/THUDM/glm-edge-1.5b-chat) + [GLMEdge-4b](https://huggingface.co/THUDM/glm-edge-4b-chat)
100+
- [x] [GLM-4-0414](https://huggingface.co/collections/THUDM/glm-4-0414-67f3cbcb34dd9d252707cb2e)
100101
- [x] [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966)
101102
- [x] [EXAONE-3.0-7.8B-Instruct](https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct)
102103
- [x] [FalconMamba Models](https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a)
@@ -259,7 +260,9 @@ The [Hugging Face](https://huggingface.co) platform hosts a [number of LLMs](htt
259260
- [Trending](https://huggingface.co/models?library=gguf&sort=trending)
260261
- [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf)
261262

262-
You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from Hugging Face by using this CLI argument: `-hf <user>/<model>[:quant]`
263+
You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from [Hugging Face](https://huggingface.co/) or other model hosting sites, such as [ModelScope](https://modelscope.cn/), by using this CLI argument: `-hf <user>/<model>[:quant]`.
264+
265+
By default, the CLI would download from Hugging Face, you can switch to other options with the environment variable `MODEL_ENDPOINT`. For example, you may opt to downloading model checkpoints from ModelScope or other model sharing communities by setting the environment variable, e.g. `MODEL_ENDPOINT=https://www.modelscope.cn/`.
263266

264267
After downloading a model, use the CLI tools to run it locally - see below.
265268

build-xcframework.sh

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,11 @@ COMMON_CMAKE_ARGS=(
4141
-DGGML_OPENMP=${GGML_OPENMP}
4242
)
4343

44+
XCODE_VERSION=$(xcodebuild -version 2>/dev/null | head -n1 | awk '{ print $2 }')
45+
MAJOR_VERSION=$(echo $XCODE_VERSION | cut -d. -f1)
46+
MINOR_VERSION=$(echo $XCODE_VERSION | cut -d. -f2)
47+
echo "Detected Xcode version: $XCODE_VERSION"
48+
4449
check_required_tool() {
4550
local tool=$1
4651
local install_message=$2
@@ -325,21 +330,28 @@ combine_static_libraries() {
325330

326331
# Platform-specific post-processing for device builds
327332
if [[ "$is_simulator" == "false" ]]; then
328-
if command -v vtool &>/dev/null; then
333+
if command -v xcrun vtool &>/dev/null; then
329334
case "$platform" in
330335
"ios")
331336
echo "Marking binary as a framework binary for iOS..."
332-
vtool -set-build-version ios ${IOS_MIN_OS_VERSION} ${IOS_MIN_OS_VERSION} -replace \
337+
xcrun vtool -set-build-version ios ${IOS_MIN_OS_VERSION} ${IOS_MIN_OS_VERSION} -replace \
333338
-output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}"
334339
;;
335340
"visionos")
336341
echo "Marking binary as a framework binary for visionOS..."
337-
vtool -set-build-version xros ${VISIONOS_MIN_OS_VERSION} ${VISIONOS_MIN_OS_VERSION} -replace \
342+
if [[ "$MAJOR_VERSION" -gt 16 ]] || [[ "$MAJOR_VERSION" -eq 16 && "$MINOR_VERSION" -gt 2 ]]; then
343+
echo "Xcode version greater than 16.2, using visionOS."
344+
VISION_OS_BUILD_VERSION="visionos"
345+
else
346+
echo "Xcode version less than or equal to 16.2, using xros."
347+
VISION_OS_BUILD_VERSION="xros"
348+
fi
349+
xcrun vtool -set-build-version ${VISION_OS_BUILD_VERSION} ${VISIONOS_MIN_OS_VERSION} ${VISIONOS_MIN_OS_VERSION} -replace \
338350
-output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}"
339351
;;
340352
"tvos")
341353
echo "Marking binary as a framework binary for tvOS..."
342-
vtool -set-build-version tvos ${TVOS_MIN_OS_VERSION} ${TVOS_MIN_OS_VERSION} -replace \
354+
xcrun vtool -set-build-version tvos ${TVOS_MIN_OS_VERSION} ${TVOS_MIN_OS_VERSION} -replace \
343355
-output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}"
344356
;;
345357
esac

common/arg.cpp

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -228,12 +228,13 @@ static bool common_download_file_single(const std::string & url, const std::stri
228228
curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
229229
curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
230230

231+
http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
231232
// Check if hf-token or bearer-token was specified
232233
if (!bearer_token.empty()) {
233234
std::string auth_header = "Authorization: Bearer " + bearer_token;
234235
http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
235-
curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
236236
}
237+
curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
237238

238239
#if defined(_WIN32)
239240
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
@@ -544,7 +545,10 @@ static struct common_hf_file_res common_get_hf_file(const std::string & hf_repo_
544545
curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
545546
curl_slist_ptr http_headers;
546547
std::string res_str;
547-
std::string url = "https://huggingface.co/v2/" + hf_repo + "/manifests/" + tag;
548+
549+
std::string model_endpoint = get_model_endpoint();
550+
551+
std::string url = model_endpoint + "v2/" + hf_repo + "/manifests/" + tag;
548552
curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
549553
curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L);
550554
typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data);
@@ -659,13 +663,8 @@ static void common_params_handle_model(
659663
}
660664
}
661665

662-
std::string hf_endpoint = "https://huggingface.co/";
663-
const char * hf_endpoint_env = getenv("HF_ENDPOINT");
664-
if (hf_endpoint_env) {
665-
hf_endpoint = hf_endpoint_env;
666-
if (hf_endpoint.back() != '/') hf_endpoint += '/';
667-
}
668-
model.url = hf_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file;
666+
std::string model_endpoint = get_model_endpoint();
667+
model.url = model_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file;
669668
// make sure model path is present (for caching purposes)
670669
if (model.path.empty()) {
671670
// this is to avoid different repo having same file name, or same file name in different subdirs

common/chat.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1622,7 +1622,7 @@ static common_chat_params common_chat_templates_apply_jinja(
16221622
}
16231623

16241624
// Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools)
1625-
if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) {
1625+
if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null() && params.tools.is_array() && params.json_schema.is_null()) {
16261626
return common_chat_params_init_hermes_2_pro(tmpl, params);
16271627
}
16281628

common/common.cpp

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -830,7 +830,7 @@ std::string fs_get_cache_directory() {
830830
if (getenv("LLAMA_CACHE")) {
831831
cache_directory = std::getenv("LLAMA_CACHE");
832832
} else {
833-
#ifdef __linux__
833+
#if defined(__linux__) || defined(__FreeBSD__)
834834
if (std::getenv("XDG_CACHE_HOME")) {
835835
cache_directory = std::getenv("XDG_CACHE_HOME");
836836
} else {
@@ -840,7 +840,9 @@ std::string fs_get_cache_directory() {
840840
cache_directory = std::getenv("HOME") + std::string("/Library/Caches/");
841841
#elif defined(_WIN32)
842842
cache_directory = std::getenv("LOCALAPPDATA");
843-
#endif // __linux__
843+
#else
844+
# error Unknown architecture
845+
#endif
844846
cache_directory = ensure_trailing_slash(cache_directory);
845847
cache_directory += "llama.cpp";
846848
}
@@ -1027,6 +1029,19 @@ struct common_init_result common_init_from_params(common_params & params) {
10271029
return iparams;
10281030
}
10291031

1032+
std::string get_model_endpoint() {
1033+
const char * model_endpoint_env = getenv("MODEL_ENDPOINT");
1034+
// We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility.
1035+
const char * hf_endpoint_env = getenv("HF_ENDPOINT");
1036+
const char * endpoint_env = model_endpoint_env ? model_endpoint_env : hf_endpoint_env;
1037+
std::string model_endpoint = "https://huggingface.co/";
1038+
if (endpoint_env) {
1039+
model_endpoint = endpoint_env;
1040+
if (model_endpoint.back() != '/') model_endpoint += '/';
1041+
}
1042+
return model_endpoint;
1043+
}
1044+
10301045
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
10311046
llama_clear_adapter_lora(ctx);
10321047
for (auto & la : lora) {

common/common.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -543,6 +543,8 @@ struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_p
543543
// clear LoRA adapters from context, then apply new list of adapters
544544
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);
545545

546+
std::string get_model_endpoint();
547+
546548
//
547549
// Batch utils
548550
//

convert_hf_to_gguf.py

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -735,6 +735,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
735735
if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406":
736736
# ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
737737
res = "llama4"
738+
if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
739+
# ref: https://huggingface.co/THUDM/glm-4-9b-hf
740+
res = "glm4"
738741

739742
if res is None:
740743
logger.warning("\n")
@@ -1750,7 +1753,7 @@ def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
17501753

17511754
low_freq_wavelen = old_context_len / low_freq_factor
17521755
high_freq_wavelen = old_context_len / high_freq_factor
1753-
assert low_freq_wavelen != high_freq_wavelen
1756+
# assert low_freq_wavelen != high_freq_wavelen # Errors for Llama4
17541757

17551758
rope_factors = []
17561759
for freq in freqs:
@@ -1806,10 +1809,6 @@ def set_gguf_parameters(self):
18061809
self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"])
18071810

18081811
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
1809-
name = name.replace("language_model.", "")
1810-
name = name.replace("feed_forward.", "mlp.") # a bit hacky for now
1811-
name = name.replace(".router.weight", ".gate.weight") # a bit hacky for now
1812-
18131812
# split the gate_up into gate and up
18141813
if "gate_up_proj" in name:
18151814
name_up = name.replace("gate_up_proj", "up_proj.weight")
@@ -4901,6 +4900,22 @@ def prepare_tensors(self):
49014900
self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
49024901

49034902

4903+
@Model.register("Glm4ForCausalLM")
4904+
class Glm4Model(Model):
4905+
model_arch = gguf.MODEL_ARCH.GLM4
4906+
4907+
def set_vocab(self):
4908+
self._set_vocab_gpt2()
4909+
4910+
def set_gguf_parameters(self):
4911+
super().set_gguf_parameters()
4912+
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
4913+
if self.hparams["rope_scaling"].get("type") == "yarn":
4914+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
4915+
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
4916+
self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
4917+
4918+
49044919
@Model.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
49054920
class ChatGLMModel(Model):
49064921
model_arch = gguf.MODEL_ARCH.CHATGLM
@@ -5592,7 +5607,6 @@ def main() -> None:
55925607
with torch.inference_mode():
55935608
output_type = ftype_map[args.outtype]
55945609
model_architecture = hparams["architectures"][0]
5595-
55965610
try:
55975611
model_class = Model.from_model_architecture(model_architecture)
55985612
except NotImplementedError:

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ class TOKENIZER_TYPE(IntEnum):
114114
{"name": "trillion", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/trillionlabs/Trillion-7B-preview", },
115115
{"name": "bailingmoe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-lite", },
116116
{"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", },
117+
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", },
117118
]
118119

119120

examples/llava/clip-impl.h

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#include "ggml.h"
22
#include "gguf.h"
3+
#include "clip.h"
34

45
#include "clip.h"
56

@@ -202,23 +203,31 @@ static void clip_log_internal(enum ggml_log_level level, const char * format, ..
202203
// cpp wrappers
203204
//
204205

206+
// wrapper for clip_image_size
207+
struct clip_image_size_deleter {
208+
void operator()(clip_image_size * val) { clip_image_size_free(val); }
209+
};
210+
typedef std::unique_ptr<clip_image_size, clip_image_size_deleter> clip_image_size_ptr;
211+
212+
// wrapper for clip_image_u8
205213
struct clip_image_u8_deleter {
206214
void operator()(clip_image_u8 * val) { clip_image_u8_free(val); }
207215
};
216+
typedef std::unique_ptr<clip_image_u8, clip_image_u8_deleter> clip_image_u8_ptr;
208217

218+
// wrapper for clip_image_f32
209219
struct clip_image_f32_deleter {
210220
void operator()(clip_image_f32 * val) { clip_image_f32_free(val); }
211221
};
222+
typedef std::unique_ptr<clip_image_f32, clip_image_f32_deleter> clip_image_f32_ptr;
212223

213-
struct clip_image_f32_batch_deleter {
214-
void operator()(clip_image_f32_batch * val) { clip_image_f32_batch_free(val); }
224+
struct clip_image_u8_batch {
225+
std::vector<clip_image_u8_ptr> entries;
215226
};
216227

217-
typedef std::unique_ptr<clip_image_u8, clip_image_u8_deleter> clip_image_u8_ptr;
218-
typedef std::unique_ptr<clip_image_f32, clip_image_f32_deleter> clip_image_f32_ptr;
219-
typedef std::unique_ptr<clip_image_f32_batch, clip_image_f32_batch_deleter> clip_image_f32_batch_ptr;
220-
221-
// TODO @ngxson : we're currently having a naming clash between struct clip_image_size and function clip_image_size()
228+
struct clip_image_f32_batch {
229+
std::vector<clip_image_f32_ptr> entries;
230+
};
222231

223232
//
224233
// common utils

0 commit comments

Comments
 (0)