Skip to content

Commit ce4d3eb

Browse files
committed
reformat
1 parent 5d13dc8 commit ce4d3eb

File tree

3 files changed

+5
-4
lines changed

3 files changed

+5
-4
lines changed

lightllm/server/router/manager.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,9 @@ def get_new_batch():
267267
if self.nnodes > 1 and self.args.dp == 1:
268268
# 使用 all_reduce 获取最小值
269269
limit_router_queue_length = len(self.req_queue.waiting_req_list)
270-
limit_router_queue_length_tensor = torch.tensor(limit_router_queue_length, dtype=torch.int32, device="cpu")
270+
limit_router_queue_length_tensor = torch.tensor(
271+
limit_router_queue_length, dtype=torch.int32, device="cpu"
272+
)
271273
dist.all_reduce(limit_router_queue_length_tensor, op=dist.ReduceOp.MIN, group=self.mulitnode_group)
272274
limit_router_queue_length = limit_router_queue_length_tensor.item()
273275

lightllm/server/router/model_infer/mode_backend/base_backend.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -297,4 +297,3 @@ def init_rank_infos(self):
297297
else:
298298
self.is_master_in_dp = False
299299
return
300-

lightllm/server/router/req_queue/chunked_prefill/impl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,12 +74,12 @@ def generate_new_batch(self, current_batch: Batch, limit_router_queue_length: in
7474
can_run_list = []
7575
abort_req_list = []
7676
aborted_count = 0
77-
77+
7878
if limit_router_queue_length is None:
7979
waiting_queue = self.waiting_req_list
8080
else:
8181
waiting_queue = self.waiting_req_list[:limit_router_queue_length]
82-
82+
8383
for req in waiting_queue:
8484
if req.is_aborted and not req.is_paused:
8585
# 由于管理的复杂性,只有没有被调度运行过的请求可以因为abort直接在队列中忽略掉.

0 commit comments

Comments
 (0)