We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8d0a01a commit 25d585aCopy full SHA for 25d585a
vllm/v1/worker/xpu_worker.py
@@ -7,6 +7,7 @@
7
8
import vllm.envs as envs
9
from vllm.config import VllmConfig
10
+from vllm.distributed import get_world_group
11
from vllm.logger import init_logger
12
from vllm.model_executor import set_random_seed
13
from vllm.platforms import current_platform
@@ -155,7 +156,8 @@ def init_device(self):
155
156
current_platform.dist_backend)
157
158
# global all_reduce needed for overall oneccl warm up
- torch.distributed.all_reduce(torch.zeros(1).xpu())
159
+ torch.distributed.all_reduce(torch.zeros(1).xpu(),
160
+ group=get_world_group().device_group)
161
162
# Set random seed.
163
set_random_seed(self.model_config.seed)
0 commit comments