Skip to content

Commit ca4e4ab

Browse files
Revert "[BugFix] fix ep (#3290)" (#3317)
This reverts commit 86ff68b.
1 parent c000cff commit ca4e4ab

File tree

2 files changed

+3
-4
lines changed

2 files changed

+3
-4
lines changed

fastdeploy/worker/gpu_model_runner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -780,7 +780,7 @@ def _prepare_inputs(self) -> None:
780780
output_padding_offset,
781781
) = pre_process(
782782
self.share_inputs["input_ids"],
783-
getattr(self.share_inputs, "seq_lens_this_time", self.seq_lens_this_time_buffer),
783+
self.share_inputs["seq_lens_this_time"],
784784
self.speculative_decoding,
785785
(self.share_inputs["draft_tokens"] if self.speculative_decoding else None),
786786
self.share_inputs["seq_lens_encoder"],
@@ -864,7 +864,7 @@ def initialize_forward_meta(self):
864864
max_len_tensor_cpu=self.share_inputs["max_len_tensor_cpu"],
865865
seq_lens_encoder=self.share_inputs["seq_lens_encoder"],
866866
seq_lens_decoder=self.share_inputs["seq_lens_decoder"],
867-
seq_lens_this_time=getattr(self.share_inputs, "seq_lens_this_time", self.seq_lens_this_time_buffer),
867+
seq_lens_this_time=self.share_inputs["seq_lens_this_time"],
868868
batch_id_per_token=self.share_inputs["batch_id_per_token"],
869869
cu_seqlens_q=self.share_inputs["cu_seqlens_q"],
870870
cu_seqlens_k=self.share_inputs["cu_seqlens_k"],

fastdeploy/worker/worker_process.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ def event_loop_ep(self) -> None:
244244
"""
245245
while True:
246246
self.worker_healthy_live_signal.value[self.local_rank % self.max_chips_per_node] = int(time.time())
247-
num_running_requests = 0
247+
248248
if self.fd_config.parallel_config.tensor_parallel_rank == 0 and self.task_queue.num_tasks() > 0:
249249
tasks, read_finish = self.task_queue.get_tasks()
250250

@@ -271,7 +271,6 @@ def event_loop_normal(self) -> None:
271271
self.nnode = int((self.parallel_config.tensor_parallel_size + 7) // 8)
272272
mp_num_per_node = self.parallel_config.tensor_parallel_size // self.nnode
273273
req_ids = []
274-
num_running_requests = 0
275274
while True:
276275
if self.local_rank == 0:
277276
if self.model_weights_status.value[0] != 0:

0 commit comments

Comments
 (0)