Skip to content

Commit 12a8bfd

Browse files
committed
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/build.yml # CODEOWNERS # README.md # docs/ops.md # docs/ops/SYCL.csv # docs/ops/Vulkan.csv # ggml/CMakeLists.txt # ggml/src/CMakeLists.txt # ggml/src/ggml-opencl/ggml-opencl.cpp # ggml/src/ggml-sycl/backend.hpp # ggml/src/ggml-sycl/element_wise.cpp # ggml/src/ggml-sycl/element_wise.hpp # ggml/src/ggml-sycl/ggml-sycl.cpp # tests/test-backend-ops.cpp # tests/test-thread-safety.cpp
2 parents be33288 + 9de9672 commit 12a8bfd

File tree

104 files changed

+16265
-610
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

104 files changed

+16265
-610
lines changed

convert_hf_to_gguf.py

Lines changed: 99 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -895,8 +895,8 @@ def get_vocab_base_pre(self, tokenizer) -> str:
895895
# ref: https://huggingface.co/JetBrains/Mellum-4b-base
896896
res = "mellum"
897897
if chkhsh == "9b1be57e70d20d9501b2b3186e792d81181ae36ada3903c26f9fea418cf87206":
898-
# ref: https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base
899-
res = "llada-moe"
898+
# ref: https://huggingface.co/inclusionAI/Ling-mini-base-2.0
899+
res = "bailingmoe2"
900900
if chkhsh == "53e325976a6e142379c19b09afcae354f2f496f147afa8f9e189a33fe4e3024e":
901901
# ref: https://huggingface.co/ibm-granite/granite-docling-258M
902902
res = "granite-docling"
@@ -8060,6 +8060,103 @@ def prepare_tensors(self):
80608060
raise ValueError(f"Unprocessed experts: {experts}")
80618061

80628062

8063+
@ModelBase.register("BailingMoeV2ForCausalLM")
8064+
class BailingMoeV2Model(TextModel):
8065+
model_arch = gguf.MODEL_ARCH.BAILINGMOE2
8066+
8067+
def __init__(self, *args, **kwargs):
8068+
super().__init__(*args, **kwargs)
8069+
if nextn_layers := self.hparams.get("num_nextn_predict_layers", 0):
8070+
self.block_count = self.hparams["num_hidden_layers"] + nextn_layers
8071+
self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
8072+
8073+
def set_vocab(self):
8074+
self._set_vocab_gpt2()
8075+
8076+
def set_gguf_parameters(self):
8077+
super().set_gguf_parameters()
8078+
hparams = self.hparams
8079+
if (rope_dim := hparams.get("head_dim")) is None:
8080+
rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
8081+
8082+
self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
8083+
rope_scaling = self.hparams.get("rope_scaling") or {}
8084+
if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
8085+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
8086+
self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
8087+
self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
8088+
else:
8089+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
8090+
self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
8091+
self.gguf_writer.add_vocab_size(hparams["vocab_size"])
8092+
self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
8093+
self.gguf_writer.add_expert_shared_feed_forward_length(hparams.get("moe_shared_expert_intermediate_size", hparams["moe_intermediate_size"] * hparams["num_shared_experts"]))
8094+
self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
8095+
self.gguf_writer.add_expert_count(hparams["num_experts"])
8096+
self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
8097+
self.gguf_writer.add_expert_group_count(hparams["n_group"])
8098+
self.gguf_writer.add_expert_group_used_count(hparams["topk_group"])
8099+
self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
8100+
8101+
if hparams["score_function"] == "sigmoid":
8102+
self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
8103+
elif hparams["score_function"] == "softmax":
8104+
self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
8105+
else:
8106+
raise ValueError(f"Unsupported score_function value: {hparams['score_function']}")
8107+
8108+
if (nextn_layers := self.hparams.get("num_nextn_predict_layers")) is not None:
8109+
self.gguf_writer.add_nextn_predict_layers(nextn_layers)
8110+
8111+
_experts: list[dict[str, Tensor]] | None = None
8112+
8113+
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
8114+
if "mlp.experts" in name:
8115+
n_experts = self.hparams["num_experts"]
8116+
assert bid is not None
8117+
8118+
tensors: list[tuple[str, Tensor]] = []
8119+
8120+
if self._experts is None:
8121+
self._experts = [{} for _ in range(self.block_count)]
8122+
8123+
self._experts[bid][name] = data_torch
8124+
8125+
if len(self._experts[bid]) >= n_experts * 3:
8126+
# merge the experts into a single 3d tensor
8127+
for w_name in ["down_proj", "gate_proj", "up_proj"]:
8128+
datas: list[Tensor] = []
8129+
8130+
for xid in range(n_experts):
8131+
ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
8132+
datas.append(self._experts[bid][ename])
8133+
del self._experts[bid][ename]
8134+
8135+
data_torch = torch.stack(datas, dim=0)
8136+
8137+
merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
8138+
8139+
new_name = self.map_tensor_name(merged_name)
8140+
8141+
tensors.append((new_name, data_torch))
8142+
8143+
return tensors
8144+
8145+
if name.endswith(".expert_bias"):
8146+
name = name.replace(".expert_bias", ".expert_bias.bias")
8147+
8148+
return [(self.map_tensor_name(name), data_torch)]
8149+
8150+
def prepare_tensors(self):
8151+
super().prepare_tensors()
8152+
8153+
if self._experts is not None:
8154+
# flatten `list[dict[str, Tensor]]` into `list[str]`
8155+
experts = [k for d in self._experts for k in d.keys()]
8156+
if len(experts) > 0:
8157+
raise ValueError(f"Unprocessed experts: {experts}")
8158+
8159+
80638160
@ModelBase.register("GroveMoeForCausalLM", "modeling_grove_moe.GroveMoeForCausalLM")
80648161
class GroveMoeModel(TextModel):
80658162
model_arch = gguf.MODEL_ARCH.GROVEMOE

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ class TOKENIZER_TYPE(IntEnum):
139139
{"name": "lfm2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LiquidAI/LFM2-Tokenizer"},
140140
{"name": "exaone4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B", },
141141
{"name": "mellum", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/JetBrains/Mellum-4b-base", },
142-
{"name": "llada-moe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base", },
142+
{"name": "bailingmoe2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-mini-base-2.0", },
143143
{"name": "granite-docling", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ibm-granite/granite-docling-258M", },
144144
]
145145

docs/backend/zDNN.md

Lines changed: 0 additions & 61 deletions
This file was deleted.

docs/build-riscv64-spacemit.md

Lines changed: 0 additions & 89 deletions
This file was deleted.

embd_res/klite.embd

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27427,7 +27427,8 @@ Current version indicated by LITEVER below.
2742727427
</div>
2742827428
<div id="oaicustom" class="menutext hidden">
2742927429
<span id="oaidesc">
27430-
Entering your OpenAI API key will allow you to use KoboldAI Lite with their API.<br><br>
27430+
Entering your <a class="color_blueurl" href="https://platform.openai.com/api-keys">OpenAI API key</a> will allow you to use KoboldAI Lite with their API.<br>
27431+
<b>Or, set a custom API URL to use ANY 3rd-party OAI Compatible API</b><br><br>
2743127432
KoboldAI Lite takes no responsibility for your usage of this feature. Your API key is used directly with the OpenAI API, <span class="color_red">unless CORS proxy is used</span>. Reconnecting resets all custom OpenAI URLs.<br>Only Temperature, Top-P and Repetition Penalty samplers are used.<br><br>
2743227433
<span class="color_green" style="font-weight: bold;">Please input OpenAI API URL and Key.</span><br>
2743327434
<div style="display:inline-flex">
@@ -27438,17 +27439,17 @@ Current version indicated by LITEVER below.
2743827439
</div>
2743927440
</span>
2744027441
<span id="openrouterdesc" class="hidden">
27441-
Entering your OpenRouter API key will allow you to use KoboldAI Lite with their API.<br><br>
27442+
Entering your <a class="color_blueurl" href="https://openrouter.ai/settings/keys">OpenRouter API key</a> will allow you to use KoboldAI Lite with their API.<br><br>
2744227443
KoboldAI Lite takes no responsibility for your usage of this feature. Your API key is used directly with the OpenRouter API and is not transmitted to us.<br>Only Temperature, Top-P and Repetition Penalty samplers are used.<br><br>
2744327444
<span class="color_green" style="font-weight: bold;">Please input OpenRouter Key.</span><br><br>
2744427445
</span>
2744527446
<span id="mistralaidesc" class="hidden">
27446-
Entering your MistralAI API key will allow you to use KoboldAI Lite with their API.<br><br>
27447+
Entering your <a class="color_blueurl" href="https://docs.mistral.ai/getting-started/quickstart">MistralAI API key</a> will allow you to use KoboldAI Lite with their API.<br><br>
2744727448
KoboldAI Lite takes no responsibility for your usage of this feature. Your API key is used directly with the MistralAI API and is not transmitted to us.<br>Only Temperature and Top-P samplers are used.<br><br>
2744827449
<span class="color_green" style="font-weight: bold;">Please input MistralAI Key.</span><br><br>
2744927450
</span>
2745027451
<span id="featherlessdesc" class="hidden">
27451-
Entering your Featherless API key will allow you to use KoboldAI Lite with their API.<br><br>
27452+
Entering your <a class="color_blueurl" href="https://featherless.ai/docs/getting-started">Featherless API key</a> will allow you to use KoboldAI Lite with their API.<br><br>
2745227453
KoboldAI Lite takes no responsibility for your usage of this feature. Your API key is used directly with the Featherless API and is not transmitted to us.<br>Only Temperature, Top-P, Top-K, Min-P and Repetition Penalty samplers are used.<br><br>
2745327454
<span class="color_green" style="font-weight: bold;">Please input Featherless Key.</span><br><br>
2745427455
</span>
@@ -27463,8 +27464,8 @@ Current version indicated by LITEVER below.
2746327464
<span class="color_green" style="font-weight: bold;">No Key Required.</span><br><br>
2746427465
</span>
2746527466
<span id="nvidianimdesc" class="hidden">
27466-
Entering your Nvidia NIM API key will allow you to use KoboldAI Lite with their API.<br><br>
27467-
KoboldAI Lite takes no responsibility for your usage of this feature. Due to CORS restrictions, your connection WILL be proxied.<br>Only Temperature, Top-P and Repetition Penalty samplers are used.<br><br>
27467+
Entering your <a class="color_blueurl" href="https://build.nvidia.com/explore/discover">Nvidia NIM API key</a> will allow you to use KoboldAI Lite with their API.<br><br>
27468+
KoboldAI Lite takes no responsibility for your usage of this feature. <span class="color_red">Due to CORS restrictions, your connection WILL be proxied.</span><br>Only Temperature, Top-P and Repetition Penalty samplers are used.<br><br>
2746827469
<span class="color_green" style="font-weight: bold;">Please input Nvidia NIM Key.</span><br><br>
2746927470
</span>
2747027471

@@ -27685,7 +27686,7 @@ Current version indicated by LITEVER below.
2768527686

2768627687
</div>
2768727688
<div id="geminicustom" class="menutext hidden">
27688-
Uses Gemini by Google.<br><br>
27689+
Entering your <a class="color_blueurl" href="https://docs.mistral.ai/getting-started/quickstart">Google Gemini API key</a> will allow you to use KoboldAI Lite with their API.<br><br>
2768927690
KoboldAI Lite takes no responsibility for your usage of this feature. Your API key is used directly with the Gemini API and is not transmitted to us.<br><br>
2769027691
<div>
2769127692
<select title="Gemini AI Model Selection" style="padding:4px; width:calc(100% - 110px); display:inline-block" class="form-control" id="custom_gemini_model" onchange="togglegeminimodel()">
@@ -27739,13 +27740,10 @@ Current version indicated by LITEVER below.
2773927740
</div>
2774027741
</div>
2774127742
<div id="coherecustom" class="menutext hidden">
27742-
Uses Cohere's models through their own API.<br><br>
27743+
Entering your <a class="color_blueurl" href="https://dashboard.cohere.com/api-keys">Cohere API key</a> will allow you to use KoboldAI Lite with their API.<br><br>
2774327744
KoboldAI Lite takes no responsibility for your usage of this feature. Your API key is used directly with the Cohere API and is not transmitted to us.<br><br>
2774427745
<select title="Cohere AI Model Selection" style="padding:4px;" class="form-control" id="custom_cohere_model">
27745-
<option value="command" selected="selected">command</option>
27746-
<option value="command-r">command-r</option>
27747-
<option value="command-r-plus">command-r-plus</option>
27748-
<option value="command-r-08-2024">command-r-08-2024</option>
27746+
<option value="command-r-08-2024" selected="selected">command-r-08-2024</option>
2774927747
<option value="command-r-plus-08-2024">command-r-plus-08-2024</option>
2775027748
<option value="command-r7b-12-2024">command-r7b-12-2024</option>
2775127749
<option value="command-a-03-2025">command-a-03-2025</option>

examples/model-conversion/scripts/utils/curl-embedding-server.sh

Lines changed: 0 additions & 6 deletions
This file was deleted.

ggml/include/ggml-hexagon.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
#pragma once
2+
3+
#include "ggml.h"
4+
#include "ggml-backend.h"
5+
6+
#ifdef __cplusplus
7+
extern "C" {
8+
#endif
9+
10+
// backend API
11+
GGML_BACKEND_API ggml_backend_t ggml_backend_hexagon_init(void);
12+
13+
GGML_BACKEND_API bool ggml_backend_is_hexagon(ggml_backend_t backend);
14+
15+
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_hexagon_reg(void);
16+
17+
#ifdef __cplusplus
18+
}
19+
#endif

0 commit comments

Comments
 (0)