@@ -2269,7 +2269,7 @@ def set_gguf_parameters(self):
22692269                self .gguf_writer .add_rope_scaling_orig_ctx_len (self .hparams ["rope_scaling" ]["original_max_position_embeddings" ])
22702270
22712271
2272- @Model .register ("Qwen2VLForConditionalGeneration" ) 
2272+ @Model .register ("Qwen2VLForConditionalGeneration" ,  "Qwen2_5_VLForConditionalGeneration" ) 
22732273class  Qwen2VLModel (Model ):
22742274    model_arch  =  gguf .MODEL_ARCH .QWEN2VL 
22752275
@@ -4419,6 +4419,29 @@ def prepare_tensors(self):
44194419                raise  ValueError (f"Unprocessed experts: { experts }  " )
44204420
44214421
4422+ @Model .register ("PLMForCausalLM" ) 
4423+ class  PLMModel (Model ):
4424+     model_arch  =  gguf .MODEL_ARCH .PLM 
4425+ 
4426+     def  set_vocab (self ):
4427+         self ._set_vocab_gpt2 ()
4428+ 
4429+     def  set_gguf_parameters (self ):
4430+         super ().set_gguf_parameters ()
4431+         hparams  =  self .hparams 
4432+         self .gguf_writer .add_vocab_size (hparams ["vocab_size" ])
4433+         self .gguf_writer .add_kv_lora_rank (hparams ["kv_lora_rank" ])
4434+         self .gguf_writer .add_key_length (hparams ["qk_nope_head_dim" ] +  hparams ["qk_rope_head_dim" ])
4435+         self .gguf_writer .add_value_length (hparams ["v_head_dim" ])
4436+         self .gguf_writer .add_rope_dimension_count (hparams ["qk_rope_head_dim" ])
4437+ 
4438+     def  modify_tensors (self , data_torch : Tensor , name : str , bid : int  |  None ) ->  Iterable [tuple [str , Tensor ]]:
4439+         return  [(self .map_tensor_name (name ), data_torch )]
4440+ 
4441+     def  prepare_tensors (self ):
4442+         super ().prepare_tensors ()
4443+ 
4444+ 
44224445@Model .register ("T5WithLMHeadModel" ) 
44234446@Model .register ("T5ForConditionalGeneration" ) 
44244447@Model .register ("MT5ForConditionalGeneration" ) 
0 commit comments