Skip to content

Commit a77fb2c

Browse files
committed
lint
Signed-off-by: Bill Nell <[email protected]>
1 parent 73226b9 commit a77fb2c

File tree

1 file changed

+7
-4
lines changed

1 file changed

+7
-4
lines changed

tests/kernels/moe/test_pplx_moe.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
from vllm.config import VllmConfig, set_current_vllm_config
2929
from vllm.model_executor.layers.activation import SiluAndMul
3030
from vllm.model_executor.layers.fused_moe.fused_batched_moe import (
31-
BatchedExperts, BatchedTritonExperts)
31+
BatchedExperts)
3232
from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk
3333
from vllm.model_executor.layers.fused_moe.modular_kernel import (
3434
FusedMoEModularKernel)
@@ -390,9 +390,11 @@ def _pplx_dispatch_combine(
390390
a_rep = torch.repeat_interleave(a, topk, dim=0).to(device)
391391

392392
torch_output = (a_rep.view(-1, topk, k) * 1.5 *
393-
topk_weight.view(-1, topk, 1).to(device)).sum(dim=1).to(a.dtype)
393+
topk_weight.view(-1, topk, 1).to(device)).sum(dim=1).to(
394+
a.dtype)
394395

395-
pplx_output = pplx_dispatch_combine(pgi, dp_size, a, topk_weight, topk_ids, num_experts)
396+
pplx_output = pplx_dispatch_combine(pgi, dp_size, a, topk_weight, topk_ids,
397+
num_experts)
396398

397399
torch_output = chunk_by_rank(torch_output, pgi.rank,
398400
pgi.world_size).to(pplx_output.device)
@@ -426,7 +428,8 @@ def test_pplx_dispatch_combine(
426428
a = torch.randn((m, k), device=device, dtype=dtype) / 10
427429
score = torch.randn((m, e), device=device, dtype=dtype)
428430

429-
parallel_launch(world_size, _pplx_dispatch_combine, dp_size, a, score, topk, e)
431+
parallel_launch(world_size, _pplx_dispatch_combine, dp_size, a, score,
432+
topk, e)
430433

431434

432435
def pplx_moe(pgi, dp_size, a, w1, w2, topk_weight, topk_ids):

0 commit comments

Comments
 (0)