@@ -4156,23 +4156,27 @@ def modify_tensors(self, data_torch, name, bid):
41564156class XLMRobertaModel (BertModel ):
41574157 model_arch = gguf .MODEL_ARCH .BERT
41584158 _lora_files = {}
4159+ _lora_names = []
41594160
41604161 def __init__ (self , dir_model : Path , ftype : gguf .LlamaFileType , fname_out : Path , ** kwargs : Any ):
41614162 hparams = kwargs .pop ("hparams" , None )
41624163 if hparams is None :
41634164 hparams = ModelBase .load_hparams (dir_model )
41644165
41654166 if lora_names := hparams .get ("lora_adaptations" ):
4167+ self ._lora_names = lora_names
41664168 self .model_arch = gguf .MODEL_ARCH .JINA_BERT_V3
41674169
41684170 super ().__init__ (dir_model , ftype , fname_out , hparams = hparams , ** kwargs )
4171+ self ._xlmroberta_tokenizer_init ()
41694172
4170- if lora_names :
4171- for name in lora_names :
4173+ def generate_extra_tensors (self ) -> Iterable [tuple [str , Tensor ]]:
4174+ if self ._lora_names :
4175+ for name in self ._lora_names :
41724176 fname = self .add_prefix_to_filename (self .fname_out , f"lora-{ name } -" )
41734177 self ._lora_files [name ] = gguf .GGUFWriter (fname , arch = gguf .MODEL_ARCH_NAMES [self .model_arch ], endianess = self .endianess , use_temp_file = self .use_temp_file , dry_run = self .dry_run )
41744178
4175- self . _xlmroberta_tokenizer_init ()
4179+ return super (). generate_extra_tensors ()
41764180
41774181 def set_type (self ):
41784182 for lora_writer in self ._lora_files .values ():
0 commit comments