Skip to content

Commit 24c1ea9

Browse files
committed
Merge branch 'master' into esocrok
2 parents b0e350e + 6ea37f5 commit 24c1ea9

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+2240
-306
lines changed

convert_hf_to_gguf.py

Lines changed: 99 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -895,8 +895,8 @@ def get_vocab_base_pre(self, tokenizer) -> str:
895895
# ref: https://huggingface.co/JetBrains/Mellum-4b-base
896896
res = "mellum"
897897
if chkhsh == "9b1be57e70d20d9501b2b3186e792d81181ae36ada3903c26f9fea418cf87206":
898-
# ref: https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base
899-
res = "llada-moe"
898+
# ref: https://huggingface.co/inclusionAI/Ling-mini-base-2.0
899+
res = "bailingmoe2"
900900
if chkhsh == "53e325976a6e142379c19b09afcae354f2f496f147afa8f9e189a33fe4e3024e":
901901
# ref: https://huggingface.co/ibm-granite/granite-docling-258M
902902
res = "granite-docling"
@@ -8060,6 +8060,103 @@ def prepare_tensors(self):
80608060
raise ValueError(f"Unprocessed experts: {experts}")
80618061

80628062

8063+
@ModelBase.register("BailingMoeV2ForCausalLM")
8064+
class BailingMoeV2Model(TextModel):
8065+
model_arch = gguf.MODEL_ARCH.BAILINGMOE2
8066+
8067+
def __init__(self, *args, **kwargs):
8068+
super().__init__(*args, **kwargs)
8069+
if nextn_layers := self.hparams.get("num_nextn_predict_layers", 0):
8070+
self.block_count = self.hparams["num_hidden_layers"] + nextn_layers
8071+
self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
8072+
8073+
def set_vocab(self):
8074+
self._set_vocab_gpt2()
8075+
8076+
def set_gguf_parameters(self):
8077+
super().set_gguf_parameters()
8078+
hparams = self.hparams
8079+
if (rope_dim := hparams.get("head_dim")) is None:
8080+
rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
8081+
8082+
self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
8083+
rope_scaling = self.hparams.get("rope_scaling") or {}
8084+
if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
8085+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
8086+
self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
8087+
self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
8088+
else:
8089+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
8090+
self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"])
8091+
self.gguf_writer.add_vocab_size(hparams["vocab_size"])
8092+
self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
8093+
self.gguf_writer.add_expert_shared_feed_forward_length(hparams.get("moe_shared_expert_intermediate_size", hparams["moe_intermediate_size"] * hparams["num_shared_experts"]))
8094+
self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
8095+
self.gguf_writer.add_expert_count(hparams["num_experts"])
8096+
self.gguf_writer.add_expert_shared_count(hparams["num_shared_experts"])
8097+
self.gguf_writer.add_expert_group_count(hparams["n_group"])
8098+
self.gguf_writer.add_expert_group_used_count(hparams["topk_group"])
8099+
self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
8100+
8101+
if hparams["score_function"] == "sigmoid":
8102+
self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
8103+
elif hparams["score_function"] == "softmax":
8104+
self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
8105+
else:
8106+
raise ValueError(f"Unsupported score_function value: {hparams['score_function']}")
8107+
8108+
if (nextn_layers := self.hparams.get("num_nextn_predict_layers")) is not None:
8109+
self.gguf_writer.add_nextn_predict_layers(nextn_layers)
8110+
8111+
_experts: list[dict[str, Tensor]] | None = None
8112+
8113+
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
8114+
if "mlp.experts" in name:
8115+
n_experts = self.hparams["num_experts"]
8116+
assert bid is not None
8117+
8118+
tensors: list[tuple[str, Tensor]] = []
8119+
8120+
if self._experts is None:
8121+
self._experts = [{} for _ in range(self.block_count)]
8122+
8123+
self._experts[bid][name] = data_torch
8124+
8125+
if len(self._experts[bid]) >= n_experts * 3:
8126+
# merge the experts into a single 3d tensor
8127+
for w_name in ["down_proj", "gate_proj", "up_proj"]:
8128+
datas: list[Tensor] = []
8129+
8130+
for xid in range(n_experts):
8131+
ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
8132+
datas.append(self._experts[bid][ename])
8133+
del self._experts[bid][ename]
8134+
8135+
data_torch = torch.stack(datas, dim=0)
8136+
8137+
merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
8138+
8139+
new_name = self.map_tensor_name(merged_name)
8140+
8141+
tensors.append((new_name, data_torch))
8142+
8143+
return tensors
8144+
8145+
if name.endswith(".expert_bias"):
8146+
name = name.replace(".expert_bias", ".expert_bias.bias")
8147+
8148+
return [(self.map_tensor_name(name), data_torch)]
8149+
8150+
def prepare_tensors(self):
8151+
super().prepare_tensors()
8152+
8153+
if self._experts is not None:
8154+
# flatten `list[dict[str, Tensor]]` into `list[str]`
8155+
experts = [k for d in self._experts for k in d.keys()]
8156+
if len(experts) > 0:
8157+
raise ValueError(f"Unprocessed experts: {experts}")
8158+
8159+
80638160
@ModelBase.register("GroveMoeForCausalLM", "modeling_grove_moe.GroveMoeForCausalLM")
80648161
class GroveMoeModel(TextModel):
80658162
model_arch = gguf.MODEL_ARCH.GROVEMOE

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ class TOKENIZER_TYPE(IntEnum):
139139
{"name": "lfm2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LiquidAI/LFM2-Tokenizer"},
140140
{"name": "exaone4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B", },
141141
{"name": "mellum", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/JetBrains/Mellum-4b-base", },
142-
{"name": "llada-moe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base", },
142+
{"name": "bailingmoe2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-mini-base-2.0", },
143143
{"name": "granite-docling", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ibm-granite/granite-docling-258M", },
144144
]
145145

ggml/src/ggml-alloc.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -598,6 +598,26 @@ static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor
598598
return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated;
599599
}
600600

601+
// free the extra space at the end if the new tensor is smaller
602+
static void ggml_gallocr_free_extra_space(ggml_gallocr_t galloc, struct ggml_tensor * node, struct ggml_tensor * parent) {
603+
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
604+
struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent);
605+
606+
size_t parent_size = ggml_backend_buft_get_alloc_size(galloc->bufts[p_hn->buffer_id], parent);
607+
size_t node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
608+
609+
GGML_ASSERT(parent_size >= node_size);
610+
611+
if (parent_size > node_size) {
612+
struct ggml_dyn_tallocr * p_alloc = galloc->buf_tallocs[p_hn->buffer_id];
613+
struct buffer_address p_addr = p_hn->addr;
614+
p_addr.offset += node_size;
615+
size_t extra_size = parent_size - node_size;
616+
AT_PRINTF("freeing extra %zu bytes from parent %s for %s\n", extra_size, parent->name, node->name);
617+
ggml_dyn_tallocr_free_tensor(p_alloc, p_addr, extra_size, parent);
618+
}
619+
}
620+
601621
static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) {
602622
GGML_ASSERT(buffer_id >= 0);
603623
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
@@ -643,13 +663,15 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor
643663
hn->addr = p_hn->addr;
644664
p_hn->allocated = false; // avoid freeing the parent
645665
view_src_hn->allocated = false;
666+
ggml_gallocr_free_extra_space(galloc, node, view_src);
646667
return;
647668
}
648669
} else {
649670
AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
650671
hn->buffer_id = p_hn->buffer_id;
651672
hn->addr = p_hn->addr;
652673
p_hn->allocated = false; // avoid freeing the parent
674+
ggml_gallocr_free_extra_space(galloc, node, parent);
653675
return;
654676
}
655677
}
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
#include "pad_reflect_1d.hpp"
2+
3+
void pad_reflect_1d_f32(const float* src,float* dst,
4+
const int64_t ne0, const int64_t ne02, const int p0, const int p1,
5+
const int64_t nb0, const int64_t nb1, const int64_t nb2, const int64_t nb3,
6+
const int64_t nb00, const int64_t nb01, const int64_t nb02, const int64_t nb03,
7+
const sycl::nd_item<3> &item_ct1){
8+
9+
const int i0 = item_ct1.get_group(0) * SYCL_CONCAT_BLOCK_SIZE + item_ct1.get_local_id(0);
10+
const int i1 = item_ct1.get_group(1);
11+
const int g2 = item_ct1.get_group(2);
12+
const int i2 = g2 % ne02;
13+
const int i3 = g2 / ne02;
14+
15+
if (i0 >= p0 + ne0 + p1) return;
16+
17+
int t = i0 - p0;
18+
int period = 2 * ne0 -2;
19+
int m = t % period;
20+
m += (m < 0) * period;
21+
int center = ne0 -1;
22+
int srci0 = center - abs(center - m);
23+
24+
int offest_src = i3*nb3 + i2*nb2 + i1*nb1 + srci0*nb0;
25+
int offest_dst = i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00;
26+
dst[offest_dst] = src[offest_src];
27+
28+
}
29+
30+
void ggml_sycl_op_pad_reflect_1d(ggml_backend_sycl_context& ctx, ggml_tensor* dst){
31+
32+
const ggml_tensor * src0 = dst->src[0];
33+
queue_ptr stream = ctx.stream();
34+
35+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
36+
GGML_ASSERT( dst->type == GGML_TYPE_F32);
37+
38+
const int32_t * opts = (const int32_t *) dst->op_params;
39+
const int p0 = opts[0];
40+
const int p1 = opts[1];
41+
42+
const int64_t ne0 = src0->ne[0];
43+
44+
const int64_t ne00 = dst->ne[0];
45+
const int64_t ne01 = dst->ne[1];
46+
const int64_t ne02 = dst->ne[2];
47+
const int64_t ne03 = dst->ne[3];
48+
49+
const int64_t nb00 = dst->nb[0];
50+
const int64_t nb01 = dst->nb[1];
51+
const int64_t nb02 = dst->nb[2];
52+
const int64_t nb03 = dst->nb[3];
53+
const int64_t nb0 = src0->nb[0];
54+
const int64_t nb1 = src0->nb[1];
55+
const int64_t nb2 = src0->nb[2];
56+
const int64_t nb3 = src0->nb[3];
57+
58+
int num_blocks = (ne00 + SYCL_CONCAT_BLOCK_SIZE - 1) / SYCL_CONCAT_BLOCK_SIZE;
59+
sycl::range<3> global(num_blocks * SYCL_CONCAT_BLOCK_SIZE, ne01, ne02*ne03);
60+
sycl::range<3> local(SYCL_CONCAT_BLOCK_SIZE, 1, 1);
61+
62+
stream->parallel_for(
63+
sycl::nd_range<3>(global,
64+
local),
65+
[=](sycl::nd_item<3> item_ct1) { pad_reflect_1d_f32(
66+
(const float *) src0->data, (float *) dst->data,
67+
ne0, ne02, p0, p1,
68+
nb0, nb1, nb2, nb3,
69+
nb00, nb01, nb02, nb03
70+
, item_ct1);
71+
});
72+
}
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
#ifndef GGML_SYCL_PAD_REFLECT_1D_HPP
2+
#define GGML_SYCL_PAD_REFLECT_1D_HPP
3+
4+
#include "common.hpp"
5+
6+
void ggml_sycl_op_pad_reflect_1d(ggml_backend_sycl_context& ctx, ggml_tensor* dst);
7+
8+
#endif // GGML_SYCL_PAD_REFLECT_1D_HPP

ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ void main() {
345345

346346
float Lfrcp[Br];
347347
[[unroll]] for (uint32_t r = 0; r < Br; ++r) {
348-
Lfrcp[r] = 1.0 / Lf[r];
348+
Lfrcp[r] = (Lf[r] == 0.0) ? 0.0 : (1.0 / Lf[r]);
349349
}
350350

351351
[[unroll]] for (uint32_t d = 0; d < HSV_per_thread / 4; ++d) {

ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ void main() {
380380

381381
float Lfrcp[rows_per_thread];
382382
[[unroll]] for (uint32_t r = 0; r < rows_per_thread; ++r) {
383-
Lfrcp[r] = 1.0 / Lf[r];
383+
Lfrcp[r] = (Lf[r] == 0.0) ? 0.0 : (1.0 / Lf[r]);
384384
}
385385

386386
[[unroll]] for (uint32_t d = 0; d < HSV_per_thread / 4; ++d) {

ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,11 @@ void main() {
121121
const float NEG_FLT_MAX_OVER_2 = uintBitsToFloat(0xFEFFFFFF);
122122

123123
L = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(0);
124+
#if defined(ACC_TYPE_MAX)
125+
M = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(-ACC_TYPE_MAX / ACC_TYPE(2));
126+
#else
124127
M = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(NEG_FLT_MAX_OVER_2);
128+
#endif
125129

126130
coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator> slopeMat = coopmat<ACC_TYPE, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator>(1.0);
127131

@@ -294,7 +298,7 @@ void main() {
294298

295299
[[unroll]]
296300
for (int k = 0; k < Ldiag.length(); ++k) {
297-
Ldiag[k] = ACC_TYPE(1.0) / Ldiag[k];
301+
Ldiag[k] = (Ldiag[k] == 0.0) ? ACC_TYPE(0.0) : (ACC_TYPE(1.0) / Ldiag[k]);
298302
}
299303

300304
O = Ldiag*O;

ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_split_k_reduce.comp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ void main() {
9191
L = L*ms + vs;
9292
}
9393

94-
L = 1.0 / L;
94+
L = (L == 0.0) ? 0.0 : 1.0 / L;
9595

9696
// D dimension is split across workgroups in the y dimension
9797
uint d = tid + gl_WorkGroupID.y * BLOCK_SIZE;

gguf-py/gguf/constants.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,8 @@ class LLM:
102102
EXPERT_COUNT = "{arch}.expert_count"
103103
EXPERT_USED_COUNT = "{arch}.expert_used_count"
104104
EXPERT_SHARED_COUNT = "{arch}.expert_shared_count"
105+
EXPERT_GROUP_COUNT = "{arch}.expert_group_count"
106+
EXPERT_GROUP_USED_COUNT = "{arch}.expert_group_used_count"
105107
EXPERT_WEIGHTS_SCALE = "{arch}.expert_weights_scale"
106108
EXPERT_WEIGHTS_NORM = "{arch}.expert_weights_norm"
107109
EXPERT_GATING_FUNC = "{arch}.expert_gating_func"
@@ -400,6 +402,7 @@ class MODEL_ARCH(IntEnum):
400402
WAVTOKENIZER_DEC = auto()
401403
PLM = auto()
402404
BAILINGMOE = auto()
405+
BAILINGMOE2 = auto()
403406
DOTS1 = auto()
404407
ARCEE = auto()
405408
ERNIE4_5 = auto()
@@ -744,6 +747,7 @@ class MODEL_TENSOR(IntEnum):
744747
MODEL_ARCH.WAVTOKENIZER_DEC: "wavtokenizer-dec",
745748
MODEL_ARCH.PLM: "plm",
746749
MODEL_ARCH.BAILINGMOE: "bailingmoe",
750+
MODEL_ARCH.BAILINGMOE2: "bailingmoe2",
747751
MODEL_ARCH.DOTS1: "dots1",
748752
MODEL_ARCH.ARCEE: "arcee",
749753
MODEL_ARCH.ERNIE4_5: "ernie4_5",
@@ -2533,6 +2537,35 @@ class MODEL_TENSOR(IntEnum):
25332537
MODEL_TENSOR.FFN_DOWN_SHEXP,
25342538
MODEL_TENSOR.FFN_UP_SHEXP,
25352539
],
2540+
MODEL_ARCH.BAILINGMOE2: [
2541+
MODEL_TENSOR.TOKEN_EMBD,
2542+
MODEL_TENSOR.OUTPUT_NORM,
2543+
MODEL_TENSOR.OUTPUT,
2544+
MODEL_TENSOR.ATTN_NORM,
2545+
MODEL_TENSOR.ATTN_Q_NORM,
2546+
MODEL_TENSOR.ATTN_K_NORM,
2547+
MODEL_TENSOR.ATTN_QKV,
2548+
MODEL_TENSOR.ATTN_OUT,
2549+
MODEL_TENSOR.FFN_GATE_INP,
2550+
MODEL_TENSOR.FFN_EXP_PROBS_B,
2551+
MODEL_TENSOR.FFN_NORM,
2552+
MODEL_TENSOR.FFN_GATE,
2553+
MODEL_TENSOR.FFN_DOWN,
2554+
MODEL_TENSOR.FFN_UP,
2555+
MODEL_TENSOR.FFN_GATE_EXP,
2556+
MODEL_TENSOR.FFN_DOWN_EXP,
2557+
MODEL_TENSOR.FFN_UP_EXP,
2558+
MODEL_TENSOR.FFN_GATE_SHEXP,
2559+
MODEL_TENSOR.FFN_DOWN_SHEXP,
2560+
MODEL_TENSOR.FFN_UP_SHEXP,
2561+
MODEL_TENSOR.NEXTN_EH_PROJ,
2562+
MODEL_TENSOR.NEXTN_EMBED_TOKENS,
2563+
MODEL_TENSOR.NEXTN_ENORM,
2564+
MODEL_TENSOR.NEXTN_HNORM,
2565+
MODEL_TENSOR.NEXTN_SHARED_HEAD_HEAD,
2566+
MODEL_TENSOR.NEXTN_SHARED_HEAD_NORM,
2567+
MODEL_TENSOR.LAYER_OUT_NORM,
2568+
],
25362569
MODEL_ARCH.DOTS1: [
25372570
MODEL_TENSOR.TOKEN_EMBD,
25382571
MODEL_TENSOR.OUTPUT_NORM,

0 commit comments

Comments
 (0)