Skip to content

Commit de5e1a8

Browse files
committed
reformat
1 parent e87821c commit de5e1a8

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

lightllm/distributed/communication_op.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
vllm_reduce = None
3939
logger = init_logger(__name__)
4040

41+
4142
@contextmanager
4243
def lightllm_capture_graph():
4344
if vllm_reduce is not None:
@@ -47,6 +48,7 @@ def lightllm_capture_graph():
4748
yield
4849
pass
4950

51+
5052
def _all_reduce(input_, op=ReduceOp.SUM, group=None, async_op=False):
5153
if op != ReduceOp.SUM or async_op:
5254
original_all_reduce(input_, op, group, async_op)
@@ -58,6 +60,7 @@ def _all_reduce(input_, op=ReduceOp.SUM, group=None, async_op=False):
5860
return
5961
original_all_reduce(input_, op, group, async_op)
6062

63+
6164
def set_custom_reduce():
6265
global vllm_reduce
6366
global device_group

lightllm/distributed/custom_all_reduce.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@
3131
from vllm.platforms import current_platform
3232
from vllm.utils import cuda_device_count_stateless
3333
from lightllm.common.basemodel.layer_infer.cache_tensor_manager import g_cache_manager
34+
3435
ops.meta_size()
3536
custom_ar = True
3637

37-
3838
logger = init_logger(__name__)
3939

4040

@@ -67,7 +67,6 @@ def __init__(self, group: ProcessGroup, device: Union[int, str, torch.device], m
6767
# disable because of missing custom allreduce library
6868
# e.g. in a non-cuda environment
6969
return
70-
7170
self.group = group
7271
assert dist.get_backend(group) != dist.Backend.NCCL, "CustomAllreduce should be attached to a non-NCCL group."
7372

0 commit comments

Comments
 (0)