Skip to content

Commit 322338b

Browse files
authored
Merge branch 'ggml-org:master' into mradermacher
2 parents d3c9aac + 17a1f0d commit 322338b

File tree

128 files changed

+3443
-5289
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

128 files changed

+3443
-5289
lines changed

.github/ISSUE_TEMPLATE/010-bug-compilation.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ body:
4040
attributes:
4141
label: GGML backends
4242
description: Which GGML backends do you know to be affected?
43-
options: [AMX, BLAS, CPU, CUDA, HIP, Kompute, Metal, Musa, RPC, SYCL, Vulkan, OpenCL]
43+
options: [AMX, BLAS, CPU, CUDA, HIP, Metal, Musa, RPC, SYCL, Vulkan, OpenCL]
4444
multiple: true
4545
validations:
4646
required: true

.github/ISSUE_TEMPLATE/011-bug-results.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ body:
4242
attributes:
4343
label: GGML backends
4444
description: Which GGML backends do you know to be affected?
45-
options: [AMX, BLAS, CPU, CUDA, HIP, Kompute, Metal, Musa, RPC, SYCL, Vulkan, OpenCL]
45+
options: [AMX, BLAS, CPU, CUDA, HIP, Metal, Musa, RPC, SYCL, Vulkan, OpenCL]
4646
multiple: true
4747
validations:
4848
required: true

.github/labeler.yml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,4 @@
11
# https://github.com/actions/labeler
2-
Kompute:
3-
- changed-files:
4-
- any-glob-to-any-file:
5-
- ggml/include/ggml-kompute.h
6-
- ggml/src/ggml-kompute/**
7-
- README-kompute.md
82
Apple Metal:
93
- changed-files:
104
- any-glob-to-any-file:

.github/workflows/build.yml

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -342,7 +342,7 @@ jobs:
342342
cd build
343343
export GGML_VK_VISIBLE_DEVICES=0
344344
# This is using llvmpipe and runs slower than other backends
345-
ctest -L main --verbose --timeout 3600
345+
ctest -L main --verbose --timeout 4200
346346
347347
ubuntu-22-cmake-hip:
348348
runs-on: ubuntu-22.04
@@ -740,9 +740,6 @@ jobs:
740740
- build: 'llvm-arm64-opencl-adreno'
741741
arch: 'arm64'
742742
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
743-
# - build: 'kompute-x64'
744-
# arch: 'x64'
745-
# defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON'
746743

747744
steps:
748745
- name: Clone
@@ -756,12 +753,6 @@ jobs:
756753
variant: ccache
757754
evict-old-files: 1d
758755

759-
- name: Clone Kompute submodule
760-
id: clone_kompute
761-
if: ${{ matrix.build == 'kompute-x64' }}
762-
run: |
763-
git submodule update --init ggml/src/ggml-kompute/kompute
764-
765756
- name: Download OpenBLAS
766757
id: get_openblas
767758
if: ${{ matrix.build == 'openblas-x64' }}
@@ -777,7 +768,7 @@ jobs:
777768
778769
- name: Install Vulkan SDK
779770
id: get_vulkan
780-
if: ${{ matrix.build == 'kompute-x64' || matrix.build == 'vulkan-x64' }}
771+
if: ${{ matrix.build == 'vulkan-x64' }}
781772
run: |
782773
curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/vulkansdk-windows-X64-${env:VULKAN_VERSION}.exe"
783774
& "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install

.gitmodules

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +0,0 @@
1-
[submodule "kompute"]
2-
path = ggml/src/ggml-kompute/kompute
3-
url = https://github.com/nomic-ai/kompute.git

CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ endfunction()
120120

121121
llama_option_depr(FATAL_ERROR LLAMA_CUBLAS GGML_CUDA)
122122
llama_option_depr(WARNING LLAMA_CUDA GGML_CUDA)
123-
llama_option_depr(WARNING LLAMA_KOMPUTE GGML_KOMPUTE)
124123
llama_option_depr(WARNING LLAMA_METAL GGML_METAL)
125124
llama_option_depr(WARNING LLAMA_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY)
126125
llama_option_depr(WARNING LLAMA_NATIVE GGML_NATIVE)

common/arg.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2734,6 +2734,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
27342734
params.public_path = value;
27352735
}
27362736
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_STATIC_PATH"));
2737+
add_opt(common_arg(
2738+
{"--api-prefix"}, "PREFIX",
2739+
string_format("prefix path the server serves from, without the trailing slash (default: %s)", params.api_prefix.c_str()),
2740+
[](common_params & params, const std::string & value) {
2741+
params.api_prefix = value;
2742+
}
2743+
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_API_PREFIX"));
27372744
add_opt(common_arg(
27382745
{"--no-webui"},
27392746
string_format("Disable the Web UI (default: %s)", params.webui ? "enabled" : "disabled"),

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -370,6 +370,7 @@ struct common_params {
370370

371371
std::string hostname = "127.0.0.1";
372372
std::string public_path = ""; // NOLINT
373+
std::string api_prefix = ""; // NOLINT
373374
std::string chat_template = ""; // NOLINT
374375
bool use_jinja = false; // NOLINT
375376
bool enable_chat_template = true;

convert_hf_to_gguf.py

Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -815,6 +815,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
815815
if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
816816
# ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
817817
res = "minerva-7b"
818+
if chkhsh == "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664":
819+
# ref: https://huggingface.co/tencent/Hunyuan-A13B-Instruct
820+
res = "hunyuan"
818821

819822
if res is None:
820823
logger.warning("\n")
@@ -6535,6 +6538,155 @@ def set_gguf_parameters(self):
65356538
super().set_gguf_parameters()
65366539
self.gguf_writer.add_audio_stack_factor(self.global_config["stack_factor"])
65376540

6541+
6542+
@ModelBase.register("HunYuanMoEV1ForCausalLM")
6543+
class HunYuanMoEModel(TextModel):
6544+
model_arch = gguf.MODEL_ARCH.HUNYUAN_MOE
6545+
6546+
def __init__(self, *args, **kwargs):
6547+
super().__init__(*args, **kwargs)
6548+
# For handling tied embeddings
6549+
self._tok_embd = None
6550+
6551+
def set_vocab(self):
6552+
from transformers import AutoTokenizer
6553+
tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
6554+
6555+
# 1. Get the pre-tokenizer identifier hash
6556+
tokpre = self.get_vocab_base_pre(tokenizer)
6557+
6558+
# 2. Reverse-engineer the merges list from mergeable_ranks
6559+
merges = []
6560+
vocab = {}
6561+
mergeable_ranks = tokenizer.mergeable_ranks
6562+
for token, rank in mergeable_ranks.items():
6563+
vocab[QwenModel.token_bytes_to_string(token)] = rank
6564+
if len(token) == 1:
6565+
continue
6566+
merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
6567+
if len(merged) == 2: # todo this is an assert in Qwen, why?
6568+
merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged)))
6569+
6570+
# 3. Generate the tokens and toktypes lists
6571+
vocab_size = self.hparams["vocab_size"]
6572+
assert tokenizer.vocab_size == vocab_size
6573+
special_tokens = tokenizer.special_tokens
6574+
reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **special_tokens}.items()}
6575+
tokens: list[str] = []
6576+
toktypes: list[int] = []
6577+
for i in range(vocab_size):
6578+
if i not in reverse_vocab:
6579+
tokens.append(f"[PAD{i}]")
6580+
toktypes.append(gguf.TokenType.UNUSED)
6581+
else:
6582+
token = reverse_vocab[i]
6583+
tokens.append(token)
6584+
if i in special_tokens.values():
6585+
toktypes.append(gguf.TokenType.CONTROL)
6586+
else:
6587+
toktypes.append(gguf.TokenType.NORMAL)
6588+
6589+
# 4. Write all vocab-related fields to the GGUF writer
6590+
self.gguf_writer.add_tokenizer_model("gpt2")
6591+
self.gguf_writer.add_tokenizer_pre(tokpre)
6592+
self.gguf_writer.add_token_list(tokens)
6593+
self.gguf_writer.add_token_types(toktypes)
6594+
self.gguf_writer.add_token_merges(merges)
6595+
6596+
# 5. Add special tokens and chat templates
6597+
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
6598+
special_vocab.add_to_gguf(self.gguf_writer)
6599+
# FIX for BOS token: Overwrite incorrect id read from config.json
6600+
self.gguf_writer.add_bos_token_id(127959) # <|bos|>
6601+
6602+
def set_gguf_parameters(self):
6603+
super().set_gguf_parameters()
6604+
hparams = self.hparams
6605+
6606+
self.gguf_writer.add_expert_count(hparams["num_experts"])
6607+
self.gguf_writer.add_expert_shared_feed_forward_length(hparams["intermediate_size"])
6608+
6609+
moe_intermediate_size = hparams["moe_intermediate_size"]
6610+
assert all(n == moe_intermediate_size[0] for n in moe_intermediate_size)
6611+
self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size[0])
6612+
6613+
moe_topk = hparams["moe_topk"]
6614+
assert all(topk == moe_topk[0] for topk in moe_topk)
6615+
self.gguf_writer.add_expert_used_count(moe_topk[0])
6616+
6617+
moe_shared_expert = hparams["num_shared_expert"]
6618+
assert all(n == moe_shared_expert[0] for n in moe_shared_expert)
6619+
self.gguf_writer.add_expert_shared_count(moe_shared_expert[0])
6620+
6621+
# Rope
6622+
rope_scaling = hparams.get("rope_scaling", {})
6623+
if rope_scaling.get("type") == "dynamic":
6624+
# HunYuan uses NTK Aware Alpha based scaling. Original implementation: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
6625+
# 1000 corresponds to a usable context length of 256k (https://github.com/Tencent-Hunyuan/Hunyuan-A13B/blob/main/report/Hunyuan_A13B_Technical_Report.pdf)
6626+
alpha = rope_scaling.get("alpha", 1000)
6627+
base = hparams.get("rope_theta", 10000.0)
6628+
dim = (hparams["hidden_size"] // hparams["num_attention_heads"]) # 128
6629+
scaled_base = base * (alpha ** (dim / (dim - 2))) # 10000 * (1000 ** (128 / 126)) = 11158839.9251
6630+
self.gguf_writer.add_rope_freq_base(scaled_base)
6631+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
6632+
self.gguf_writer.add_rope_scaling_factor(1)
6633+
# There is no consistent way to calculate ctx from alpha, and the config is incorrectly set to 32k
6634+
self.gguf_writer.add_rope_scaling_orig_ctx_len(256 * 1024) # 256k context length
6635+
self.gguf_writer.add_context_length(256 * 1024) # 256k context length
6636+
6637+
# if any of our assumptions about the values are wrong, something has changed and this may need to be updated
6638+
assert alpha == 1000 and base == 10000.0 and dim == 128 and self.hparams["max_position_embeddings"] in [32 * 1024, 256 * 1024] , \
6639+
"HunYuan dynamic RoPE scaling assumptions changed, please update the logic or context length manually"
6640+
6641+
_experts: list[dict[str, Tensor]] | None = None
6642+
6643+
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
6644+
if name == "model.embed_tokens.weight":
6645+
self._tok_embd = data_torch.clone()
6646+
6647+
if name == "lm_head.weight":
6648+
if self.hparams.get("tie_word_embeddings", False):
6649+
logger.info("Skipping tied output layer 'lm_head.weight'")
6650+
return []
6651+
6652+
if name.find("mlp.experts") != -1:
6653+
n_experts = self.hparams["num_experts"]
6654+
assert bid is not None
6655+
6656+
if self._experts is None:
6657+
self._experts = [{} for _ in range(self.block_count)]
6658+
6659+
self._experts[bid][name] = data_torch
6660+
6661+
if len(self._experts[bid]) >= n_experts * 3:
6662+
# merge the experts into a single 3d tensor
6663+
tensors: list[tuple[str, Tensor]] = []
6664+
for w_name in ["down_proj", "gate_proj", "up_proj"]:
6665+
datas: list[Tensor] = []
6666+
6667+
for xid in range(n_experts):
6668+
ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
6669+
datas.append(self._experts[bid][ename])
6670+
del self._experts[bid][ename]
6671+
6672+
data_torch = torch.stack(datas, dim=0)
6673+
merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
6674+
new_name = self.map_tensor_name(merged_name)
6675+
tensors.append((new_name, data_torch))
6676+
6677+
return tensors
6678+
else:
6679+
return []
6680+
6681+
return [(self.map_tensor_name(name), data_torch)]
6682+
6683+
def prepare_tensors(self):
6684+
super().prepare_tensors()
6685+
if self._experts is not None:
6686+
experts = [k for d in self._experts for k in d.keys()]
6687+
if len(experts) > 0:
6688+
raise ValueError(f"Unprocessed experts: {experts}")
6689+
65386690
###### CONVERSION LOGIC ######
65396691

65406692

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,7 @@ class TOKENIZER_TYPE(IntEnum):
137137
{"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516"},
138138
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", "chkhsh": "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2"},
139139
{"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", "chkhsh": "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35"},
140+
{"name": "hunyuan", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tencent/Hunyuan-A13B-Instruct", "chkhsh": "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664"},
140141
]
141142

142143

0 commit comments

Comments
 (0)