Skip to content

Commit e0aeac5

Browse files
authored
[BugFix] num_seqs (#3291)
* fix num_seqs * merge develop
1 parent b88537a commit e0aeac5

File tree

3 files changed

+6
-3
lines changed

3 files changed

+6
-3
lines changed

fastdeploy/model_executor/models/ernie4_5_moe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ def empty_input_forward(self):
450450
self.fd_config.model_config.moe_layer_start_index,
451451
self.fd_config.model_config.num_hidden_layers,
452452
):
453-
self.ernie.layers[i].mlp.expert(fake_hidden_states)
453+
self.ernie.layers[i].mlp.experts(fake_hidden_states, self.ernie.layers[i].mlp.gate)
454454

455455
def forward(
456456
self,

fastdeploy/worker/gpu_model_runner.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -795,7 +795,7 @@ def _prepare_inputs(self) -> None:
795795
output_padding_offset,
796796
) = pre_process(
797797
self.share_inputs["input_ids"],
798-
self.share_inputs["seq_lens_this_time"],
798+
getattr(self.share_inputs, "seq_lens_this_time", self.seq_lens_this_time_buffer),
799799
self.speculative_decoding,
800800
(self.share_inputs["draft_tokens"] if self.speculative_decoding else None),
801801
self.share_inputs["seq_lens_encoder"],
@@ -880,7 +880,7 @@ def initialize_forward_meta(self):
880880
max_len_tensor_cpu=self.share_inputs["max_len_tensor_cpu"],
881881
seq_lens_encoder=self.share_inputs["seq_lens_encoder"],
882882
seq_lens_decoder=self.share_inputs["seq_lens_decoder"],
883-
seq_lens_this_time=self.share_inputs["seq_lens_this_time"],
883+
seq_lens_this_time=getattr(self.share_inputs, "seq_lens_this_time", self.seq_lens_this_time_buffer),
884884
batch_id_per_token=self.share_inputs["batch_id_per_token"],
885885
cu_seqlens_q=self.share_inputs["cu_seqlens_q"],
886886
cu_seqlens_k=self.share_inputs["cu_seqlens_k"],

fastdeploy/worker/worker_process.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -244,6 +244,7 @@ def event_loop_ep(self) -> None:
244244
"""
245245
while True:
246246
self.worker_healthy_live_signal.value[self.local_rank % self.max_chips_per_node] = int(time.time())
247+
num_running_requests = 0
247248

248249
if self.fd_config.parallel_config.tensor_parallel_rank == 0 and self.task_queue.num_tasks() > 0:
249250
tasks, read_finish = self.task_queue.get_tasks()
@@ -271,6 +272,8 @@ def event_loop_normal(self) -> None:
271272
self.nnode = int((self.parallel_config.tensor_parallel_size + 7) // 8)
272273
mp_num_per_node = self.parallel_config.tensor_parallel_size // self.nnode
273274
req_ids = []
275+
num_running_requests = 0
276+
274277
while True:
275278
if self.local_rank == 0:
276279
if self.model_weights_status.value[0] != 0:

0 commit comments

Comments
 (0)