@@ -856,20 +856,34 @@ void llama_model::load_hparams(llama_model_loader & ml) {
856856 // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931
857857 if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) {
858858 // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct
859+ LLAMA_LOG_WARN("%s: assuming n_swa = 2047 for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct\n", __func__);
860+
859861 hparams.n_swa = 2047;
860862 } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) {
861863 // default value for Phi-3-mini-128k-instruct
862- // note: this seems incorrect because the window is bigger than the train context?
863- hparams.n_swa = 262144;
864+ LLAMA_LOG_WARN("%s: assuming n_swa = n_ctx_train for Phi-3-mini-128k-instruct\n", __func__);
865+
866+ hparams.n_swa = hparams.n_ctx_train;
867+ hparams.n_swa_pattern = 1;
864868 } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) {
865869 // default value for Phi-3-medium-128k-instruct
866- // note: this seems incorrect because the window is equal to the train context?
867- hparams.n_swa = 131072;
870+ LLAMA_LOG_WARN("%s: assuming n_swa = n_ctx_train for Phi-3-medium-128k-instruct\n", __func__);
871+
872+ hparams.n_swa = hparams.n_ctx_train;
873+ hparams.n_swa_pattern = 1;
868874 }
875+
869876 bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
870877 if (!found_swa && hparams.n_swa == 0) {
871878 throw std::runtime_error("invalid value for sliding_window");
872879 }
880+
881+ if (hparams.n_swa > hparams.n_ctx_train) {
882+ LLAMA_LOG_WARN("%s: unexpected n_swa: %d >= %d, setting to 0\n", __func__, hparams.n_swa, hparams.n_ctx_train);
883+
884+ hparams.n_swa = hparams.n_ctx_train;
885+ hparams.n_swa_pattern = 1;
886+ }
873887 } break;
874888 case LLM_ARCH_PHIMOE:
875889 {
0 commit comments