We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent bbe9bd9 commit 105a40fCopy full SHA for 105a40f
vllm/model_executor/parallel_utils/custom_all_reduce.py
@@ -29,6 +29,10 @@ def init_custom_ar() -> None:
29
return
30
rank = get_tensor_model_parallel_rank()
31
world_size = get_tensor_model_parallel_world_size()
32
+ if world_size == 1:
33
+ # No need to initialize custom allreduce for single GPU case.
34
+ return
35
+
36
if world_size not in _SUPPORTED_WORLD_SIZES:
37
logger.warn(
38
"Custom allreduce is disabled due to an unsupported world size: "
0 commit comments