Skip to content

Commit 93dd74f

Browse files
committed
lint
Signed-off-by: Bill Nell <[email protected]>
1 parent ca2ff26 commit 93dd74f

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

vllm/model_executor/layers/fused_moe/cutlass_moe.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,9 @@
77

88
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
99
from vllm import _custom_ops as ops
10-
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
1110
from vllm.model_executor.layers.fused_moe.dispatch_combine import (
12-
StandardDispatchCombine
13-
)
14-
from vllm.model_executor.layers.fused_moe.utils import (_resize_cache,
15-
_fp8_perm)
11+
StandardDispatchCombine)
12+
from vllm.model_executor.layers.fused_moe.utils import _fp8_perm, _resize_cache
1613
from vllm.scalar_type import scalar_types
1714

1815

vllm/model_executor/layers/fused_moe/fused_batched_moe.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -390,6 +390,7 @@ class BatchedDispatchCombine(mk.FusedMoEQuantizeDispatchCombine):
390390
expert batched format, i.e. E x max_num_tokens x K. This is the format
391391
that the PPLX dispatch/combine kernels use.
392392
"""
393+
393394
def __init__(self, max_num_tokens: Optional[int], world_size: int,
394395
dp_size: int, rank: int):
395396
super().__init__()
@@ -487,6 +488,7 @@ class BatchedExperts(mk.FusedMoEPermuteExpertsUnpermute):
487488
i.e. E x max_num_tokens x K. This is the format that the pplx
488489
dispatch/combine kernels use.
489490
"""
491+
490492
def __init__(
491493
self,
492494
world_size: int,
@@ -593,6 +595,7 @@ class BatchedTritonExperts(mk.FusedMoEPermuteExpertsUnpermute):
593595
i.e. E x max_num_tokens x K. This is the format that the pplx
594596
dispatch/combine kernels use.
595597
"""
598+
596599
def __init__(
597600
self,
598601
max_num_tokens: Optional[int] = None,

0 commit comments

Comments
 (0)