@@ -2206,43 +2206,43 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
2206
2206
case LLM_ARCH_PHIMOE:
2207
2207
{
2208
2208
const int64_t n_embd_head = n_embd / n_head;
2209
-
2209
+
2210
2210
tok_embd = create_tensor (tn (LLM_TENSOR_TOKEN_EMBD, " weight" ), { n_embd, n_vocab }, 0 );
2211
-
2211
+
2212
2212
// output
2213
2213
output_norm = create_tensor (tn (LLM_TENSOR_OUTPUT_NORM, " weight" ), { n_embd }, 0 );
2214
2214
output_norm_b = create_tensor (tn (LLM_TENSOR_OUTPUT_NORM, " bias" ), {n_embd}, 0 );
2215
2215
output = create_tensor (tn (LLM_TENSOR_OUTPUT, " weight" ), { n_embd, n_vocab }, 0 );
2216
2216
output_b = create_tensor (tn (LLM_TENSOR_OUTPUT, " bias" ), { n_vocab }, 0 );
2217
-
2217
+
2218
2218
for (int i = 0 ; i < n_layer; ++i) {
2219
2219
auto & layer = layers[i];
2220
-
2220
+
2221
2221
layer.attn_norm = create_tensor (tn (LLM_TENSOR_ATTN_NORM, " weight" , i), { n_embd }, 0 );
2222
2222
layer.attn_norm_b = create_tensor (tn (LLM_TENSOR_ATTN_NORM, " bias" , i), { n_embd }, 0 );
2223
-
2223
+
2224
2224
layer.wqkv = create_tensor (tn (LLM_TENSOR_ATTN_QKV, " weight" , i), { n_embd, n_embd + 2 * n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED);
2225
2225
if (layer.wqkv == nullptr ) {
2226
2226
layer.wq = create_tensor (tn (LLM_TENSOR_ATTN_Q, " weight" , i), {n_embd, n_embd}, 0 );
2227
2227
layer.bq = create_tensor (tn (LLM_TENSOR_ATTN_Q, " bias" , i), {n_embd}, 0 );
2228
-
2228
+
2229
2229
layer.wk = create_tensor (tn (LLM_TENSOR_ATTN_K, " weight" , i), {n_embd, n_embd_gqa}, 0 );
2230
2230
layer.bk = create_tensor (tn (LLM_TENSOR_ATTN_K, " bias" , i), {n_embd_gqa}, 0 );
2231
-
2231
+
2232
2232
layer.wv = create_tensor (tn (LLM_TENSOR_ATTN_V, " weight" , i), {n_embd, n_embd_gqa}, 0 );
2233
2233
layer.bv = create_tensor (tn (LLM_TENSOR_ATTN_V, " bias" , i), {n_embd_gqa}, 0 );
2234
2234
}
2235
2235
layer.wo = create_tensor (tn (LLM_TENSOR_ATTN_OUT, " weight" , i), { n_embd, n_embd }, 0 );
2236
2236
layer.bo = create_tensor (tn (LLM_TENSOR_ATTN_OUT, " bias" , i), { n_embd }, 0 );
2237
-
2237
+
2238
2238
layer.ffn_norm = create_tensor (tn (LLM_TENSOR_FFN_NORM, " weight" , i), { n_embd }, 0 );
2239
2239
layer.ffn_norm_b = create_tensor (tn (LLM_TENSOR_FFN_NORM, " bias" , i), { n_embd }, 0 );
2240
-
2240
+
2241
2241
layer.ffn_gate_inp = create_tensor (tn (LLM_TENSOR_FFN_GATE_INP, " weight" , i), {n_embd, n_expert}, 0 );
2242
2242
layer.ffn_gate_exps = create_tensor (tn (LLM_TENSOR_FFN_GATE_EXPS, " weight" , i), {n_embd, n_ff, n_expert}, 0 );
2243
2243
layer.ffn_down_exps = create_tensor (tn (LLM_TENSOR_FFN_DOWN_EXPS, " weight" , i), {n_ff, n_embd, n_expert}, 0 );
2244
2244
layer.ffn_up_exps = create_tensor (tn (LLM_TENSOR_FFN_UP_EXPS, " weight" , i), {n_embd, n_ff, n_expert}, 0 );
2245
-
2245
+
2246
2246
layer.rope_long = create_tensor (tn (LLM_TENSOR_ROPE_FACTORS_LONG, " weight" , i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0 ));
2247
2247
layer.rope_short = create_tensor (tn (LLM_TENSOR_ROPE_FACTORS_SHORT, " weight" , i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0 ));
2248
2248
}
0 commit comments