Skip to content

Commit e4145c0

Browse files
ikawrakowIwan Kawrakow
andauthored
Add support for SmolLM3 (#934)
* Convert from HF * Model loading and compute graph --------- Co-authored-by: Iwan Kawrakow <[email protected]>
1 parent a313b71 commit e4145c0

File tree

10 files changed

+199
-10
lines changed

10 files changed

+199
-10
lines changed

convert_hf_to_gguf.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4182,6 +4182,21 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
41824182
return super().modify_tensors(data_torch, name, bid)
41834183

41844184

4185+
@Model.register("SmolLM3ForCausalLM")
4186+
class SmolLM3Model(LlamaModel):
4187+
model_arch = gguf.MODEL_ARCH.SMOLLM3
4188+
4189+
def set_vocab(self):
4190+
super().set_vocab()
4191+
# remove unsupported array slicing in chat template
4192+
# ref: https://huggingface.co/ggml-org/SmolLM3-3B-GGUF/discussions/1
4193+
from transformers import AutoTokenizer
4194+
tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
4195+
if tokenizer.chat_template is not None:
4196+
chat_template = tokenizer.chat_template.replace("[:]", "")
4197+
self.gguf_writer.add_chat_template(chat_template)
4198+
4199+
41854200
@Model.register("Dots1ForCausalLM")
41864201
class Dots1Model(Qwen2MoeModel):
41874202
model_arch = gguf.MODEL_ARCH.DOTS1

gguf-py/gguf/constants.py

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,8 @@ class MODEL_ARCH(IntEnum):
249249
ERNIE4_5_MOE = auto()
250250
BAILINGMOE2 = auto()
251251
MINIMAXM2 = auto()
252-
252+
SMOLLM3 = auto()
253+
253254
class MODEL_TENSOR(IntEnum):
254255
TOKEN_EMBD = auto()
255256
TOKEN_EMBD_NORM = auto()
@@ -396,6 +397,7 @@ class MODEL_TENSOR(IntEnum):
396397
MODEL_ARCH.ERNIE4_5_MOE: "ernie4_5-moe",
397398
MODEL_ARCH.BAILINGMOE2: "bailingmoe2",
398399
MODEL_ARCH.MINIMAXM2: "minimax-m2",
400+
MODEL_ARCH.SMOLLM3: "smollm3",
399401
}
400402

401403
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
@@ -1344,6 +1346,22 @@ class MODEL_TENSOR(IntEnum):
13441346
MODEL_TENSOR.FFN_UP_EXP,
13451347
MODEL_TENSOR.FFN_EXP_PROBS_B,
13461348
],
1349+
MODEL_ARCH.SMOLLM3: [
1350+
MODEL_TENSOR.TOKEN_EMBD,
1351+
MODEL_TENSOR.OUTPUT_NORM,
1352+
MODEL_TENSOR.OUTPUT,
1353+
MODEL_TENSOR.ROPE_FREQS,
1354+
MODEL_TENSOR.ATTN_NORM,
1355+
MODEL_TENSOR.ATTN_Q,
1356+
MODEL_TENSOR.ATTN_K,
1357+
MODEL_TENSOR.ATTN_V,
1358+
MODEL_TENSOR.ATTN_OUT,
1359+
MODEL_TENSOR.ATTN_ROT_EMBD,
1360+
MODEL_TENSOR.FFN_NORM,
1361+
MODEL_TENSOR.FFN_GATE,
1362+
MODEL_TENSOR.FFN_DOWN,
1363+
MODEL_TENSOR.FFN_UP,
1364+
],
13471365
# TODO
13481366
}
13491367

src/llama-arch.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
6767
{ LLM_ARCH_OPENAI_MOE, "gpt-oss" },
6868
{ LLM_ARCH_BAILINGMOE2, "bailingmoe2" },
6969
{ LLM_ARCH_MINIMAX_M2, "minimax-m2" },
70+
{ LLM_ARCH_SMOLLM3, "smollm3" },
7071
{ LLM_ARCH_UNKNOWN, "(unknown)" },
7172
};
7273

src/llama-arch.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ enum llm_arch {
6666
LLM_ARCH_OPENAI_MOE,
6767
LLM_ARCH_BAILINGMOE2,
6868
LLM_ARCH_MINIMAX_M2,
69+
LLM_ARCH_SMOLLM3,
6970
LLM_ARCH_UNKNOWN,
7071
};
7172

src/llama-build-context.cpp

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8489,6 +8489,100 @@ ggml_cgraph* llm_build_context::build_minimaxm2() {
84898489
return gf;
84908490
}
84918491

8492+
ggml_cgraph* llm_build_context::build_smollm3() {
8493+
ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
8494+
const int64_t n_embd_head = hparams.n_embd_head_v;
8495+
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
8496+
// GGML_ASSERT(n_embd_head == hparams.n_rot); this is wrong in case of minimax, head_dim = 128, n_rot = 64
8497+
8498+
ggml_tensor * cur;
8499+
ggml_tensor * inpL;
8500+
8501+
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
8502+
8503+
ggml_tensor * inp_pos = build_inp_pos();
8504+
8505+
8506+
//auto * inp_attn = build_attn_inp_kv();
8507+
ggml_tensor * inp_out_ids = build_inp_out_ids();
8508+
ggml_tensor * KQ_mask = build_inp_KQ_mask();
8509+
8510+
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
8511+
8512+
for (int il = 0; il < n_layer; ++il) {
8513+
ggml_tensor * inpSA = inpL;
8514+
8515+
const bool use_rope = (il + 1) % hparams.n_no_rope_layer_step != 0;
8516+
8517+
// norm
8518+
cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, cb, il);
8519+
cb(cur, "attn_norm", il);
8520+
8521+
// self-attention
8522+
{
8523+
auto [Qcur, Kcur, Vcur] = llm_build_mul_mat_qkv(gf, cur,
8524+
model.layers[il].wqkv, model.layers[il].bqkv,
8525+
model.layers[il].wqk, model.layers[il].bqk,
8526+
model.layers[il].wq, model.layers[il].bq,
8527+
model.layers[il].wk, model.layers[il].bk,
8528+
model.layers[il].wv, model.layers[il].bv,
8529+
model.layers[il].attn_q_norm, model.layers[il].attn_k_norm, 0, il);
8530+
8531+
if (use_rope) {
8532+
Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
8533+
ext_factor, attn_factor, beta_fast, beta_slow);
8534+
cb(Qcur, "Qcur", il);
8535+
8536+
Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
8537+
ext_factor, attn_factor, beta_fast, beta_slow);
8538+
cb(Kcur, "Kcur", il);
8539+
}
8540+
8541+
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
8542+
model.layers[il].wo, model.layers[il].bo,
8543+
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);
8544+
cb(cur, "attn_out", il);
8545+
}
8546+
if (il == n_layer - 1 && inp_out_ids) {
8547+
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
8548+
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
8549+
}
8550+
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
8551+
cb(ffn_inp, "ffn_inp", il);
8552+
8553+
// feed-forward network
8554+
cur = llm_build_norm(ctx0, ffn_inp, hparams, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, cb, il);
8555+
cb(cur, "ffn_norm", il);
8556+
8557+
cur = llm_build_ffn(ctx0, lctx, cur,
8558+
model.layers[il].ffn_up, NULL, NULL,
8559+
model.layers[il].ffn_gate, NULL, NULL,
8560+
model.layers[il].ffn_down, NULL, NULL,
8561+
NULL,
8562+
LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
8563+
cb(cur, "ffn_out", il);
8564+
8565+
cur = ggml_add(ctx0, cur, ffn_inp);
8566+
cur = lctx.cvec.apply_to(ctx0, cur, il);
8567+
cb(cur, "l_out", il);
8568+
8569+
// input for next layer
8570+
inpL = cur;
8571+
}
8572+
cur = inpL;
8573+
8574+
cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM_RMS, cb, -1);
8575+
cb(cur, "result_norm", -1);
8576+
8577+
// lm_head
8578+
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
8579+
cb(cur, "result_output", -1);
8580+
8581+
ggml_build_forward_expand(gf, cur);
8582+
8583+
return gf;
8584+
}
8585+
84928586
ggml_cgraph * llm_build_context::llama_build_graph_defrag(llama_context & lctx, const std::vector<uint32_t> & ids) {
84938587
llama_batch dummy;
84948588
dummy.n_tokens = 0;
@@ -8839,6 +8933,10 @@ ggml_cgraph * llm_build_context::llama_build_graph(
88398933
{
88408934
result = llm.build_minimaxm2();
88418935
} break;
8936+
case LLM_ARCH_SMOLLM3:
8937+
{
8938+
result = llm.build_smollm3();
8939+
} break;
88428940
default:
88438941
GGML_ABORT("fatal error");
88448942
}

src/llama-build-context.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,8 @@ struct llm_build_context {
270270

271271
ggml_cgraph * build_minimaxm2();
272272

273+
ggml_cgraph * build_smollm3();
274+
273275
//
274276
static ggml_tensor * llm_build_lora_mm(llama_context & lctx, ggml_context * ctx0,
275277
ggml_tensor * w, ggml_tensor * cur);

src/llama-hparams.cpp

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1013,16 +1013,26 @@ void llm_load_hparams(
10131013

10141014
} break;
10151015
case LLM_ARCH_MINIMAX_M2:
1016-
{
1017-
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1018-
ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
1019-
ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
1016+
{
1017+
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1018+
ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
1019+
ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
10201020

1021-
switch (hparams.n_layer) {
1022-
case 62: model.type = e_model::MODEL_230B_A10B; break;
1023-
default: model.type = e_model::MODEL_UNKNOWN;
1024-
}
1025-
} break;
1021+
switch (hparams.n_layer) {
1022+
case 62: model.type = e_model::MODEL_230B_A10B; break;
1023+
default: model.type = e_model::MODEL_UNKNOWN;
1024+
}
1025+
} break;
1026+
case LLM_ARCH_SMOLLM3:
1027+
{
1028+
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1029+
hparams.n_no_rope_layer_step = 4;
1030+
1031+
switch (hparams.n_layer) {
1032+
case 36: model.type = e_model::MODEL_3B; break;
1033+
default: model.type = e_model::MODEL_UNKNOWN;
1034+
}
1035+
} break;
10261036
default: (void)0;
10271037
}
10281038

src/llama-load-tensors.cpp

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,8 @@ struct create_tensors_helper : public create_tensors_helper_interface {
130130

131131
bool create_minimaxm2_tensors(const LLM_TN & tn);
132132

133+
bool create_smollm3_tensors(const LLM_TN & tn);
134+
133135
llama_model_loader & ml;
134136
llama_model & model;
135137

@@ -2466,6 +2468,28 @@ bool create_tensors_helper::create_minimaxm2_tensors(const LLM_TN & tn) {
24662468
return use_mmap_buffer;
24672469
}
24682470

2471+
bool create_tensors_helper::create_smollm3_tensors(const LLM_TN & tn) {
2472+
LOADING_PRELUDE
2473+
2474+
create_embd_output(tn, n_embd, n_vocab);
2475+
2476+
for (int i = 0; i < n_layer; ++i) {
2477+
ggml_context* ctx_layer = ctx_for_layer(i);
2478+
ggml_context* ctx_split = ctx_for_layer_split(i);
2479+
auto & layer = model.layers[i];
2480+
2481+
layer.attn_norm = create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
2482+
2483+
use_mmap_buffer &= !merge_qkv(tn, i, 0);
2484+
2485+
layer.wo = create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
2486+
2487+
layer.ffn_norm = create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
2488+
create_std_ffn(i, tn, layer, n_ff, n_embd, ctx_split);
2489+
}
2490+
return use_mmap_buffer;
2491+
}
2492+
24692493
bool create_tensors_helper::merge_qkv(const LLM_TN & tn, int i, int bias) {
24702494
auto& hparams = model.hparams;
24712495
const int64_t n_head = hparams.n_head();
@@ -2699,6 +2723,8 @@ bool create_tensors_helper::create_tensors() {
26992723
use_mmap_buffer = create_bailingmoe2_tensors(tn); break;
27002724
case LLM_ARCH_MINIMAX_M2:
27012725
use_mmap_buffer = create_minimaxm2_tensors(tn); break;
2726+
case LLM_ARCH_SMOLLM3:
2727+
use_mmap_buffer = create_smollm3_tensors(tn); break;
27022728
default:
27032729
throw std::runtime_error("unknown architecture");
27042730
}

src/llama-model.cpp

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1249,6 +1249,23 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
12491249
{ LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
12501250
},
12511251
},
1252+
{
1253+
LLM_ARCH_SMOLLM3,
1254+
{
1255+
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1256+
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1257+
{ LLM_TENSOR_OUTPUT, "output" },
1258+
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1259+
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1260+
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1261+
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1262+
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1263+
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1264+
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1265+
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1266+
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1267+
},
1268+
},
12521269
{
12531270
LLM_ARCH_UNKNOWN,
12541271
{

src/llama.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4642,6 +4642,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
46424642
case LLM_ARCH_COHERE2:
46434643
case LLM_ARCH_ERNIE4_5:
46444644
case LLM_ARCH_ERNIE4_5_MOE:
4645+
case LLM_ARCH_SMOLLM3:
46454646
return LLAMA_ROPE_TYPE_NORM;
46464647

46474648
// the pairs of head values are offset by n_rot/2

0 commit comments

Comments
 (0)