@@ -2495,12 +2495,12 @@ struct llama_hparams {
24952495 uint32_t time_decay_extra_dim = 0;
24962496 uint32_t wkv_head_size = 0;
24972497
2498- float rope_attn_factor = 1.0f;
2499- float rope_freq_base_train;
2500- float rope_freq_scale_train;
2501- uint32_t n_ctx_orig_yarn;
2502- float rope_yarn_log_mul;
2503- std::array< int, 4> rope_sections;
2498+ float rope_attn_factor = 1.0f;
2499+ float rope_freq_base_train;
2500+ float rope_freq_scale_train;
2501+ uint32_t n_ctx_orig_yarn;
2502+ float rope_yarn_log_mul;
2503+ int rope_sections[4] ;
25042504
25052505 // for State Space Models
25062506 uint32_t ssm_d_conv = 0;
@@ -5779,8 +5779,9 @@ static void llm_load_hparams(
57795779 } break;
57805780 case LLM_ARCH_QWEN2VL:
57815781 {
5782- std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
5783- ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
5782+ std::array<int, 4> section_dims;
5783+ ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, section_dims, 4, true);
5784+ std::copy(section_dims.begin(), section_dims.begin() + 4, std::begin(hparams.rope_sections));
57845785 }
57855786 // fall through
57865787 case LLM_ARCH_QWEN2:
@@ -12614,7 +12615,7 @@ struct llm_build_context {
1261412615 // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
1261512616 struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
1261612617 int sections[4];
12617- std::copy(hparams.rope_sections.begin( ), hparams.rope_sections.end() , sections);
12618+ std::copy(std::begin( hparams.rope_sections), std::begin( hparams.rope_sections) + 4 , sections);
1261812619
1261912620 for (int il = 0; il < n_layer; ++il) {
1262012621 struct ggml_tensor * inpSA = inpL;
0 commit comments