@@ -775,27 +775,6 @@ void llama_model::load_hparams(llama_model_loader & ml) {
775775 ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
776776 ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
777777
778- switch (hparams.n_layer) {
779- case 12:
780- type = LLM_TYPE_47M; break; // granite-embeddings-small
781- default: type = LLM_TYPE_UNKNOWN;
782- }
783- } break;
784- case LLM_ARCH_MODERN_BERT:
785- {
786-
787- hparams.swa_type = LLAMA_SWA_TYPE_LOCAL;
788-
789- hparams.set_swa_pattern(3, 0);
790- hparams.rope_freq_base_train_swa = 10000.f;
791- hparams.rope_freq_base_train = 160000.f;
792- hparams.n_swa = 128;
793-
794- ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
795- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
796- ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
797- ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
798-
799778 switch (hparams.n_layer) {
800779 case 12:
801780 type = LLM_TYPE_47M; break; // granite-embeddings-small
@@ -7792,7 +7771,7 @@ struct llm_build_modern_bert : public llm_graph_context {
77927771 inpL = build_norm(inpL, model.tok_norm, nullptr, LLM_NORM, -1);
77937772 cb(inpL, "inp_norm", -1);
77947773
7795- auto * inp_attn = build_attn_inp_kv_unified_iswa ();
7774+ auto * inp_attn = build_attn_inp_kv_iswa ();
77967775
77977776 // iterate layers
77987777 for (int il = 0; il < n_layer; ++il) {
@@ -7842,8 +7821,8 @@ struct llm_build_modern_bert : public llm_graph_context {
78427821 cb(Vcur, "Vcur", il);
78437822
78447823 cur = build_attn(inp_attn,
7845- model.layers[il].wo, nullptr,
7846- Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
7824+ model.layers[il].wo, nullptr,
7825+ Qcur, Kcur, Vcur, nullptr , nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
78477826 cb(cur, "kqv_out", il);
78487827
78497828 if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) {
0 commit comments