Skip to content

Commit 471ddb9

Browse files
[XPU] Remove distributed_executor_backend check (#30760)
Signed-off-by: sihao.li <[email protected]> Co-authored-by: Kunshang Ji <[email protected]>
1 parent bb24592 commit 471ddb9

File tree

1 file changed

+0
-27
lines changed

1 file changed

+0
-27
lines changed

vllm/platforms/xpu.py

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
import torch
99

10-
import vllm.envs as envs
1110
from vllm.attention.backends.registry import AttentionBackendEnum
1211
from vllm.logger import init_logger
1312

@@ -168,32 +167,6 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
168167
if vllm_config.kv_transfer_config is not None:
169168
vllm_config.kv_transfer_config.enable_permute_local_kv = True
170169

171-
if parallel_config.distributed_executor_backend is None:
172-
if parallel_config.world_size > 1:
173-
parallel_config.distributed_executor_backend = "ray"
174-
else:
175-
parallel_config.distributed_executor_backend = "uni"
176-
elif parallel_config.distributed_executor_backend == "mp":
177-
# FIXME(kunshang):
178-
# spawn needs calling `if __name__ == '__main__':`
179-
# fork is not supported for xpu start new process.
180-
if envs.VLLM_WORKER_MULTIPROC_METHOD != "spawn":
181-
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
182-
logger.warning(
183-
"Please use spawn as start method if you want to use mp."
184-
)
185-
elif (
186-
parallel_config.distributed_executor_backend != "ray"
187-
and parallel_config.distributed_executor_backend != "uni"
188-
and parallel_config.distributed_executor_backend != "external_launcher"
189-
):
190-
logger.warning(
191-
"%s is not supported on XPU, fallback to ray distributed"
192-
" executor backend.",
193-
parallel_config.distributed_executor_backend,
194-
)
195-
parallel_config.distributed_executor_backend = "ray"
196-
197170
if model_config and model_config.use_mla:
198171
logger.info(
199172
"MLA is enabled on a non-GPU platform; forcing chunked "

0 commit comments

Comments
 (0)