Skip to content
This repository was archived by the owner on Sep 4, 2025. It is now read-only.

Commit 7e2ac48

Browse files
committed
isort
1 parent 87acddd commit 7e2ac48

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

benchmarks/kernels/benchmark_paged_attention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
from vllm import _custom_ops as ops
88
from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, FlexibleArgumentParser,
9-
create_kv_caches_with_random, seed_everything, is_hip)
9+
create_kv_caches_with_random, is_hip, seed_everything)
1010

1111
NUM_BLOCKS = 1024 * 1024
1212
PARTITION_SIZE = 512

tests/kernels/test_moe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
marlin_quantize)
2222
from vllm.model_executor.models.mixtral import MixtralMoE
2323
from vllm.scalar_type import scalar_types
24-
from vllm.utils import seed_everything, is_hip
24+
from vllm.utils import is_hip, seed_everything
2525

2626

2727
def torch_moe(a, w1, w2, score, topk):

vllm/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -905,8 +905,8 @@ def _verify_args(self) -> None:
905905
if self.use_ray:
906906
from vllm.executor import ray_utils
907907
ray_utils.assert_ray_available()
908-
if (not self.disable_custom_all_reduce and self.world_size > 1 and
909-
self.pipeline_parallel_size > 1):
908+
if (not self.disable_custom_all_reduce and self.world_size > 1
909+
and self.pipeline_parallel_size > 1):
910910
self.disable_custom_all_reduce = True
911911
logger.info(
912912
"Disabled the custom all-reduce kernel because it is not "

0 commit comments

Comments
 (0)