Skip to content

Commit 0b530f7

Browse files
fix
1 parent 732d8ea commit 0b530f7

File tree

1 file changed

+1
-14
lines changed

1 file changed

+1
-14
lines changed

lightllm/models/mixtral/layer_weights/transformer_layer_weight.py

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def _init_moe(self):
5050
)
5151

5252
moe_mode = os.getenv("MOE_MODE", "TP")
53-
assert moe_mode in ["EP", "TP"]
53+
assert moe_mode in ["TP"], f"Unsupported moe mode: {moe_mode}"
5454

5555
if moe_mode == "TP":
5656
self.experts = FusedMoeWeightTP(
@@ -67,18 +67,5 @@ def _init_moe(self):
6767
quant_cfg=self.quant_cfg,
6868
num_fused_shared_experts=0,
6969
)
70-
elif moe_mode == "EP":
71-
self.experts = FusedMoeWeightEP(
72-
gate_proj_name="w1",
73-
down_proj_name="w2",
74-
up_proj_name="w3",
75-
e_score_correction_bias_name="",
76-
weight_prefix=f"model.layers.{self.layer_num_}.block_sparse_moe.experts",
77-
n_routed_experts=self.n_routed_experts,
78-
data_type=self.data_type_,
79-
network_config=self.network_config_,
80-
layer_num=self.layer_num_,
81-
quant_cfg=self.quant_cfg,
82-
)
8370
else:
8471
raise ValueError(f"Unsupported moe mode: {moe_mode}")

0 commit comments

Comments
 (0)