@@ -8305,15 +8305,18 @@ class LFM2VLModel(MmprojModel):
83058305 def __init__ (self , * args , ** kwargs ):
83068306 super ().__init__ (* args , ** kwargs )
83078307 assert self .hparams_vision is not None
8308+ # TODO(tarek): for dynamic resolution image_size is not specified, setting here for compatibility
83088309 self .hparams_vision ["image_size" ] = 256
83098310
83108311 def set_gguf_parameters (self ):
83118312 super ().set_gguf_parameters ()
83128313 self .gguf_writer .add_clip_projector_type (gguf .VisionProjectorType .LFM2 )
8313- self .gguf_writer .add_vision_attention_layernorm_eps (self .hparams . get ( "layer_norm_eps" , 1e-6 ))
8314- self .gguf_writer .add_vision_projector_scale_factor (self .global_config .get ("downsample_factor" , 2 ))
8314+ self .gguf_writer .add_vision_attention_layernorm_eps (self .find_vparam ([ "layer_norm_eps" ] ))
8315+ self .gguf_writer .add_vision_projector_scale_factor (self .global_config .get ("downsample_factor" ))
83158316 self .gguf_writer .add_vision_use_gelu (True )
8316- self .gguf_writer .add_vision_block_count (self .find_vparam (self .n_block_keys ) - 1 )
8317+ # python notation, e.g. for vision_feature_layer == -1, we pick last layer -> vision_feature_layers_to_drop = 0
8318+ vision_feature_layers_to_drop = - (self .global_config .get ("vision_feature_layer" ) + 1 )
8319+ self .gguf_writer .add_vision_block_count (self .find_vparam (self .n_block_keys ) - vision_feature_layers_to_drop )
83178320
83188321 def modify_tensors (self , data_torch : Tensor , name : str , bid : int | None ) -> Iterable [tuple [str , Tensor ]]:
83198322 del bid # unused
0 commit comments