@@ -22,9 +22,9 @@ class GraniteGPTQForCausalLM(BaseGPTQModel):
2222 """Enable Granite for GPTQ."""
2323
2424 layer_type = "GraniteDecoderLayer"
25- layers_block_name = "model.layers"
26- outside_layer_modules = ["model.embed_tokens" , "model.norm" ]
27- inside_layer_modules = [
25+ layers_node = "model.layers"
26+ base_modules = ["model.embed_tokens" , "model.norm" ]
27+ layer_modules = [
2828 ["self_attn.k_proj" , "self_attn.v_proj" , "self_attn.q_proj" ],
2929 ["self_attn.o_proj" ],
3030 ["mlp.up_proj" , "mlp.gate_proj" ],
@@ -36,9 +36,9 @@ class GraniteMoeGPTQForCausalLM(BaseGPTQModel):
3636 """Enable Granite MOE for GPTQ."""
3737
3838 layer_type = "GraniteMoeDecoderLayer"
39- layers_block_name = "model.layers"
40- outside_layer_modules = ["model.embed_tokens" , "model.norm" ]
41- inside_layer_modules = [
39+ layers_node = "model.layers"
40+ base_modules = ["model.embed_tokens" , "model.norm" ]
41+ layer_modules = [
4242 ["self_attn.k_proj" , "self_attn.v_proj" , "self_attn.q_proj" ],
4343 ["self_attn.o_proj" ],
4444 ["block_sparse_moe.input_linear" , "block_sparse_moe.output_linear" ],
0 commit comments