From 222a96c71585088ed2313d05e168753bad824345 Mon Sep 17 00:00:00 2001 From: kaikai <2837968358@qq.com> Date: Wed, 12 Mar 2025 14:47:53 +0800 Subject: [PATCH] fix(usage): error usage format in streaming --- src/agents/models/openai_chatcompletions.py | 24 +++++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index a7340d058..903540f5e 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -405,20 +405,30 @@ async def stream_response( for function_call in state.function_calls.values(): outputs.append(function_call) - final_response = response.model_copy(update={"output": outputs, "usage": usage}) + final_usage = ( + Usage( + requests=1, + input_tokens=usage.prompt_tokens, + output_tokens=usage.completion_tokens, + total_tokens=usage.total_tokens, + ) + if usage + else Usage() + ) + final_response = response.model_copy(update={"output": outputs, "usage": final_usage}) yield ResponseCompletedEvent( response=final_response, type="response.completed", ) + if tracing.include_data(): - span_generation.span_data.output = [final_response.model_dump()] + span_generation.span_data.output = [output.model_dump() for output in outputs] - if usage: - span_generation.span_data.usage = { - "input_tokens": usage.prompt_tokens, - "output_tokens": usage.completion_tokens, - } + span_generation.span_data.usage = { + "input_tokens": final_usage.input_tokens, + "output_tokens": final_usage.output_tokens, + } @overload async def _fetch_response(