Skip to content

Commit 918aa00

Browse files
committed
fix
1 parent 0539dbd commit 918aa00

File tree

1 file changed

+1
-3
lines changed

1 file changed

+1
-3
lines changed

lightllm/common/basemodel/layer_weights/meta_weights/fused_moe_weight.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -82,8 +82,7 @@ def experts(self, input_tensor, router_logits, top_k, renormalize, use_grouped_t
8282
w2, w2_scale = self.w2
8383
use_fp8_w8a8 = self.quant_method is not None
8484

85-
# from lightllm.common.fused_moe.grouped_fused_moe import fused_experts_impl
86-
from vllm.model_executor.layers.fused_moe.fused_moe import fused_experts_impl
85+
from lightllm.common.fused_moe.grouped_fused_moe import fused_experts_impl
8786

8887
fused_experts_impl(
8988
hidden_states=input_tensor,
@@ -95,7 +94,6 @@ def experts(self, input_tensor, router_logits, top_k, renormalize, use_grouped_t
9594
use_fp8_w8a8=use_fp8_w8a8,
9695
w1_scale=w1_scale,
9796
w2_scale=w2_scale,
98-
block_shape=[128, 128],
9997
)
10098
return
10199

0 commit comments

Comments
 (0)