Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions tensorrt_llm/_torch/pyexecutor/handle_logits.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,3 @@ def __call__(
logits_view = logits[logits_begin:logits_end].reshape(
1, beam_width, -1)
llm_req.py_result.append_generation_logits(logits_view)

# Finalize any remaining logits transfers for all requests in chunked mode
for llm_req in chain(context_requests, generation_requests):
if llm_req.py_use_chunked_generation_logits and llm_req.py_return_generation_logits:
llm_req.py_result.transfer_remaining_device_logits()
6 changes: 6 additions & 0 deletions tensorrt_llm/_torch/pyexecutor/py_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2637,6 +2637,12 @@ def _handle_responses(self):
if request.return_perf_metrics and request.py_decoding_iter >= 1:
request.update_perf_metrics(self.iter_counter)

if request.is_finished:
# Finalize any remaining logits transfers for the finished request in chunked mode
if request.py_use_chunked_generation_logits and request.py_return_generation_logits:
with torch.inference_mode():
request.py_result.transfer_remaining_device_logits()

request_done = False
if request.py_decoding_iter == 1 or request.is_finished or \
request.py_decoding_iter % self.stream_interval == 0:
Expand Down