Skip to content

Commit 95de3c6

Browse files
committed
[Code] Fix editorconfig erros
1 parent 9d47a39 commit 95de3c6

File tree

4 files changed

+12
-15
lines changed

4 files changed

+12
-15
lines changed

convert_hf_to_gguf.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4414,8 +4414,7 @@ def prepare_tensors(self):
44144414
# flatten `list[dict[str, Tensor]]` into `list[str]`
44154415
experts = [k for d in self._experts for k in d.keys()]
44164416
if len(experts) > 0:
4417-
raise ValueError(f"Unprocessed experts: {experts}")
4418-
4417+
raise ValueError(f"Unprocessed experts: {experts}")
44194418

44204419
@Model.register("PLMForCausalLM")
44214420
class PLMModel(Model):
@@ -4426,7 +4425,6 @@ def set_vocab(self):
44264425
def set_gguf_parameters(self):
44274426
super().set_gguf_parameters()
44284427
hparams = self.hparams
4429-
44304428
self.gguf_writer.add_vocab_size(hparams["vocab_size"])
44314429
self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
44324430
self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
@@ -4439,7 +4437,6 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
44394437
def prepare_tensors(self):
44404438
super().prepare_tensors()
44414439

4442-
44434440
@Model.register("T5WithLMHeadModel")
44444441
@Model.register("T5ForConditionalGeneration")
44454442
@Model.register("MT5ForConditionalGeneration")

gguf-py/gguf/constants.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1466,18 +1466,18 @@ class MODEL_TENSOR(IntEnum):
14661466
MODEL_TENSOR.FFN_EXP_PROBS_B,
14671467
],
14681468
MODEL_ARCH.PLM: [
1469-
MODEL_TENSOR.TOKEN_EMBD,
1469+
MODEL_TENSOR.TOKEN_EMBD,
14701470
MODEL_TENSOR.OUTPUT,
1471-
MODEL_TENSOR.OUTPUT_NORM,
1472-
MODEL_TENSOR.ATTN_NORM,
1471+
MODEL_TENSOR.OUTPUT_NORM,
1472+
MODEL_TENSOR.ATTN_NORM,
14731473
MODEL_TENSOR.ATTN_Q,
14741474
MODEL_TENSOR.ATTN_KV_A_MQA,
1475-
MODEL_TENSOR.ATTN_KV_A_NORM,
1476-
MODEL_TENSOR.ATTN_KV_B,
1477-
MODEL_TENSOR.ATTN_OUT,
1478-
MODEL_TENSOR.FFN_NORM,
1479-
MODEL_TENSOR.FFN_UP,
1480-
MODEL_TENSOR.FFN_DOWN,
1475+
MODEL_TENSOR.ATTN_KV_A_NORM,
1476+
MODEL_TENSOR.ATTN_KV_B,
1477+
MODEL_TENSOR.ATTN_OUT,
1478+
MODEL_TENSOR.FFN_NORM,
1479+
MODEL_TENSOR.FFN_UP,
1480+
MODEL_TENSOR.FFN_DOWN,
14811481
],
14821482
MODEL_ARCH.CHATGLM : [
14831483
MODEL_TENSOR.TOKEN_EMBD,

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1133,7 +1133,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
11331133
}
11341134
} break;
11351135
case LLM_ARCH_PLM:
1136-
{
1136+
{
11371137
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
11381138
ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
11391139
switch (hparams.n_layer) {

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -337,4 +337,4 @@ const char * llama_print_system_info(void) {
337337
}
338338

339339
return s.c_str();
340-
}
340+
}

0 commit comments

Comments
 (0)