@@ -2579,15 +2579,20 @@ def set_gguf_parameters(self):
25792579 elif self .global_config ['model_type' ] == 'qwen2_5_vl' :
25802580 self .gguf_writer .add_vision_projector_type (gguf .VisionProjectorType .QWEN25VL )
25812581 self .gguf_writer .add_vision_use_silu (True )
2582- # find n_wa_pattern (window attention pattern)
2583- fullatt_block_indexes = hparams .get ("fullatt_block_indexes" )
2584- assert fullatt_block_indexes is not None , "fullatt_block_indexes is required for qwen2_5_vl"
2585- n_wa_pattern = fullatt_block_indexes [0 ] + 1
2586- # validate n_wa_pattern
2587- for i in range (1 , len (fullatt_block_indexes )):
2588- if fullatt_block_indexes [i ] - fullatt_block_indexes [i - 1 ] != n_wa_pattern :
2589- raise ValueError (f"Invalid fullatt_block_indexes: { fullatt_block_indexes } " )
2590- self .gguf_writer .add_vision_n_wa_pattern (n_wa_pattern )
2582+ out_hidden_size = hparams .get ("out_hidden_size" )
2583+ if out_hidden_size == 5120 :
2584+ # 32B model does not have n_wa_pattern, the other models do
2585+ self .gguf_writer .add_vision_n_wa_pattern (0 )
2586+ else :
2587+ # find n_wa_pattern (window attention pattern)
2588+ fullatt_block_indexes = hparams .get ("fullatt_block_indexes" )
2589+ assert fullatt_block_indexes is not None , "fullatt_block_indexes is required for qwen2_5_vl"
2590+ n_wa_pattern = fullatt_block_indexes [0 ] + 1
2591+ # validate n_wa_pattern
2592+ for i in range (1 , len (fullatt_block_indexes )):
2593+ if fullatt_block_indexes [i ] - fullatt_block_indexes [i - 1 ] != n_wa_pattern :
2594+ raise ValueError (f"Invalid fullatt_block_indexes: { fullatt_block_indexes } " )
2595+ self .gguf_writer .add_vision_n_wa_pattern (n_wa_pattern )
25912596 else :
25922597 raise ValueError (f"Unknown QwenVL model type: { self .global_config ['model_type' ]} " )
25932598 # default values below are taken from HF tranformers code
0 commit comments