Skip to content

Commit a3164ac

Browse files
[v0.11.0][Bugfix][MoE] enable force_load_balance in aclgraph (#4367)
### What this PR does / why we need it? Enable force_load_balance in aclgraph, solving OOM issues. pick from #4366 ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? e2e & ut Signed-off-by: Pr0Wh1teGivee <[email protected]>
1 parent 75452ab commit a3164ac

File tree

1 file changed

+2
-15
lines changed

1 file changed

+2
-15
lines changed

vllm_ascend/ops/common_fused_moe.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
import torch
2121
import torch_npu
22-
from vllm.config import CompilationLevel, get_current_vllm_config
22+
from vllm.config import get_current_vllm_config
2323
from vllm.distributed import (get_dp_group, get_ep_group, get_tp_group,
2424
tensor_model_parallel_all_reduce)
2525
from vllm.forward_context import get_forward_context
@@ -51,20 +51,7 @@ class AscendUnquantizedFusedMoEMethod(UnquantizedFusedMoEMethod):
5151
def __init__(self, moe: FusedMoEConfig = None):
5252

5353
super().__init__(moe=moe)
54-
55-
# NOTE: Currently, this self.use_aclgraph is only used in
56-
# UnquantizedFusedMoEMethod.forward_oot to decide whether to use in
57-
# ops/fused_moe.py:568 to circumvent torch.randint_like not supported issue.
58-
# Once torch.randint_like is supported or removed, this flag can be removed.
59-
vllm_config = get_current_vllm_config()
60-
ascend_config = get_ascend_config()
6154
self.dynamic_eplb = get_ascend_config().dynamic_eplb
62-
if ascend_config.torchair_graph_config.enabled:
63-
self.use_aclgraph = False
64-
else:
65-
self.use_aclgraph = (vllm_config.compilation_config.level
66-
== CompilationLevel.PIECEWISE and
67-
not vllm_config.model_config.enforce_eager)
6855
self.transpose = True
6956

7057
def process_weights_after_loading(self, layer):
@@ -133,7 +120,7 @@ def apply(self,
133120
# this is a naive implementation for experts load balance so as
134121
# to avoid accumulating too much tokens on a single rank.
135122
# currently it is only activated when doing profile runs.
136-
if enable_force_load_balance and not self.use_aclgraph:
123+
if enable_force_load_balance:
137124
topk_ids = torch.randint_like(topk_ids, 0, global_num_experts)
138125

139126
moe_comm_method = get_forward_context().moe_comm_method

0 commit comments

Comments
 (0)