Skip to content

Commit 970a9e4

Browse files
committed
Add better tracing for sync_provider
1 parent a08a0a5 commit 970a9e4

File tree

1 file changed

+182
-19
lines changed

1 file changed

+182
-19
lines changed

src/agentex/lib/adk/providers/_modules/sync_provider.py

Lines changed: 182 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -109,10 +109,82 @@ async def get_response(
109109

110110
response = await self.original_model.get_response(**kwargs)
111111

112-
# Set span output
113-
if span:
112+
# Set span output with structured data
113+
if span and response:
114+
new_items = []
115+
final_output = ""
116+
117+
# Extract final output text first - try multiple sources
118+
if hasattr(response, 'final_output') and response.final_output:
119+
final_output = response.final_output
120+
elif hasattr(response, 'text') and response.text:
121+
final_output = response.text
122+
elif hasattr(response, 'content') and response.content:
123+
final_output = response.content
124+
125+
# Extract items from the response
126+
if hasattr(response, 'new_items') and response.new_items:
127+
for item in response.new_items:
128+
# Handle reasoning items
129+
if hasattr(item, 'type') and item.type == 'reasoning':
130+
reasoning_summary = []
131+
if hasattr(item, 'summary') and item.summary:
132+
for summary_part in item.summary:
133+
if hasattr(summary_part, 'text'):
134+
reasoning_summary.append({
135+
"text": summary_part.text,
136+
"type": "summary_text"
137+
})
138+
139+
new_items.append({
140+
"id": getattr(item, 'id', None),
141+
"type": "reasoning",
142+
"status": getattr(item, 'status', None),
143+
"content": None,
144+
"summary": reasoning_summary if reasoning_summary else None,
145+
})
146+
147+
# Handle tool call items
148+
elif hasattr(item, 'type') and item.type == 'function_call':
149+
new_items.append({
150+
"id": getattr(item, 'id', None),
151+
"name": getattr(item, 'name', None),
152+
"type": "function_call",
153+
"status": getattr(item, 'status', 'completed'),
154+
"call_id": getattr(item, 'call_id', None),
155+
"arguments": getattr(item, 'arguments', None),
156+
})
157+
158+
# Handle tool output items
159+
elif hasattr(item, 'type') and item.type == 'function_call_output':
160+
new_items.append({
161+
"type": "function_call_output",
162+
"output": getattr(item, 'output', None),
163+
"call_id": getattr(item, 'call_id', None),
164+
})
165+
166+
# Handle message items
167+
elif hasattr(item, 'type') and item.type == 'message':
168+
content = []
169+
if hasattr(item, 'content') and item.content:
170+
for content_part in item.content:
171+
if hasattr(content_part, 'text'):
172+
content.append({
173+
"text": content_part.text,
174+
"type": "output_text",
175+
})
176+
177+
new_items.append({
178+
"id": getattr(item, 'id', None),
179+
"role": getattr(item, 'role', 'assistant'),
180+
"type": "message",
181+
"status": getattr(item, 'status', 'completed'),
182+
"content": content,
183+
})
184+
114185
span.output = {
115-
"response": str(response) if response else None,
186+
"new_items": new_items,
187+
"final_output": final_output if final_output else None,
116188
}
117189

118190
return response
@@ -160,7 +232,9 @@ async def stream_response(
160232
# Wrap the streaming in a tracing span if tracer is available
161233
if self.tracer and self.trace_id:
162234
trace = self.tracer.trace(self.trace_id)
163-
async with trace.span(
235+
236+
# Manually start the span instead of using context manager
237+
span = await trace.start_span(
164238
parent_id=self.parent_span_id,
165239
name="run_agent_streamed",
166240
input={
@@ -172,7 +246,9 @@ async def stream_response(
172246
"handoffs": [str(h) for h in handoffs] if handoffs else [],
173247
"previous_response_id": previous_response_id,
174248
},
175-
) as span:
249+
)
250+
251+
try:
176252
# Get the stream from the original model
177253
stream_kwargs = {
178254
"system_instructions": system_instructions,
@@ -193,23 +269,110 @@ async def stream_response(
193269
# Get the stream response from the original model and yield each event
194270
stream_response = self.original_model.stream_response(**stream_kwargs)
195271

196-
# Pass through each event from the original stream
197-
event_count = 0
198-
final_output = None
272+
# Pass through each event from the original stream and track items
273+
new_items = []
274+
final_response_text = ""
275+
current_text_item = None
276+
tool_call_map = {} # Map call_id to tool name
277+
199278
async for event in stream_response:
200-
event_count += 1
201-
# Track the final output if available
202-
if hasattr(event, 'type') and event.type == 'raw_response_event':
203-
if hasattr(event.data, 'output'):
204-
final_output = event.data.output
279+
# Track reasoning, tool calls, and responses from run_item_stream_event
280+
if hasattr(event, 'type') and event.type == 'run_item_stream_event':
281+
if hasattr(event, 'item'):
282+
item = event.item
283+
284+
# Handle reasoning items
285+
if item.type == 'reasoning_item':
286+
reasoning_summary = []
287+
if hasattr(item, 'raw_item') and hasattr(item.raw_item, 'summary'):
288+
for summary_part in item.raw_item.summary:
289+
if hasattr(summary_part, 'text'):
290+
reasoning_summary.append({
291+
"text": summary_part.text,
292+
"type": "summary_text"
293+
})
294+
295+
new_items.append({
296+
"id": getattr(item.raw_item, 'id', None),
297+
"type": "reasoning",
298+
"status": getattr(item.raw_item, 'status', None),
299+
"content": None,
300+
"summary": reasoning_summary if reasoning_summary else None,
301+
})
302+
303+
# Handle tool call items
304+
elif item.type == 'tool_call_item':
305+
call_id, tool_name, tool_arguments = _extract_tool_call_info(item.raw_item)
306+
tool_call_map[call_id] = tool_name
307+
308+
new_items.append({
309+
"id": getattr(item.raw_item, 'id', None),
310+
"name": tool_name,
311+
"type": "function_call",
312+
"status": getattr(item.raw_item, 'status', 'completed'),
313+
"call_id": call_id,
314+
"arguments": str(tool_arguments) if isinstance(tool_arguments, dict) else tool_arguments,
315+
})
316+
317+
# Handle tool output items
318+
elif item.type == 'tool_call_output_item':
319+
call_id, tool_name, content = _extract_tool_response_info(tool_call_map, item.raw_item)
320+
321+
new_items.append({
322+
"type": "function_call_output",
323+
"output": content,
324+
"call_id": call_id,
325+
})
326+
327+
# Accumulate text deltas to build final response
328+
# Note: OpenAI Agents SDK can emit events in different formats
329+
if hasattr(event, 'type') and event.type == 'response.output_text.delta':
330+
# Direct event type from OpenAI Agents SDK (observed in practice)
331+
if hasattr(event, 'delta'):
332+
final_response_text += event.delta
333+
334+
# Handle raw_response_event wrapper (alternative event format, kept for compatibility)
335+
elif hasattr(event, 'type') and event.type == 'raw_response_event':
336+
if hasattr(event, 'data'):
337+
raw_event = event.data
338+
339+
# Track when output items are added
340+
if isinstance(raw_event, ResponseOutputItemAddedEvent):
341+
if hasattr(raw_event, 'item') and raw_event.item.type == 'message':
342+
current_text_item = {
343+
"id": getattr(raw_event.item, 'id', None),
344+
"role": getattr(raw_event.item, 'role', 'assistant'),
345+
"type": "message",
346+
"status": "in_progress",
347+
"content": []
348+
}
349+
350+
# Check if this is a text delta event
351+
elif isinstance(raw_event, ResponseTextDeltaEvent):
352+
if hasattr(raw_event, 'delta') and raw_event.delta:
353+
final_response_text += raw_event.delta
354+
355+
# Track when output items are done
356+
elif isinstance(raw_event, ResponseOutputItemDoneEvent):
357+
if current_text_item and final_response_text:
358+
current_text_item["status"] = "completed"
359+
current_text_item["content"] = [{
360+
"text": final_response_text,
361+
"type": "output_text",
362+
}]
363+
new_items.append(current_text_item)
364+
current_text_item = None
365+
205366
yield event
206367

207-
# Set span output
208-
if span:
209-
span.output = {
210-
"event_count": event_count,
211-
"final_output": str(final_output) if final_output else None,
212-
}
368+
# Set span output with structured data including tool calls and final response
369+
span.output = {
370+
"new_items": new_items,
371+
"final_output": final_response_text if final_response_text else None,
372+
}
373+
finally:
374+
# End the span after all events have been yielded
375+
await trace.end_span(span)
213376
else:
214377
# No tracing, just stream normally
215378
# Get the stream from the original model

0 commit comments

Comments
 (0)