We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d82eacf commit 9863d02Copy full SHA for 9863d02
src/llama-model.cpp
@@ -740,9 +740,9 @@ void llama_model::load_hparams(llama_model_loader & ml) {
740
} break;
741
case LLM_ARCH_NEO_BERT:
742
{
743
- ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
744
- ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
745
- ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
+ ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
+ ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
746
747
if (hparams.n_layer == 28) {
748
type = LLM_TYPE_250M;
0 commit comments