Skip to content

Commit 732d8ea

Browse files
fix
1 parent 781eaa1 commit 732d8ea

File tree

1 file changed

+0
-2
lines changed

1 file changed

+0
-2
lines changed

lightllm/models/mixtral/layer_weights/transformer_layer_weight.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,12 +75,10 @@ def _init_moe(self):
7575
e_score_correction_bias_name="",
7676
weight_prefix=f"model.layers.{self.layer_num_}.block_sparse_moe.experts",
7777
n_routed_experts=self.n_routed_experts,
78-
split_inter_size=split_inter_size,
7978
data_type=self.data_type_,
8079
network_config=self.network_config_,
8180
layer_num=self.layer_num_,
8281
quant_cfg=self.quant_cfg,
83-
num_fused_shared_experts=0,
8482
)
8583
else:
8684
raise ValueError(f"Unsupported moe mode: {moe_mode}")

0 commit comments

Comments
 (0)