We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 781eaa1 commit 732d8eaCopy full SHA for 732d8ea
lightllm/models/mixtral/layer_weights/transformer_layer_weight.py
@@ -75,12 +75,10 @@ def _init_moe(self):
75
e_score_correction_bias_name="",
76
weight_prefix=f"model.layers.{self.layer_num_}.block_sparse_moe.experts",
77
n_routed_experts=self.n_routed_experts,
78
- split_inter_size=split_inter_size,
79
data_type=self.data_type_,
80
network_config=self.network_config_,
81
layer_num=self.layer_num_,
82
quant_cfg=self.quant_cfg,
83
- num_fused_shared_experts=0,
84
)
85
else:
86
raise ValueError(f"Unsupported moe mode: {moe_mode}")
0 commit comments