Skip to content

Commit 0455756

Browse files
authored
feat: add structured_output_span (#655)
* feat: add structured_output_span
1 parent 49ff226 commit 0455756

File tree

2 files changed

+82
-22
lines changed

2 files changed

+82
-22
lines changed

src/strands/agent/agent.py

Lines changed: 43 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
from ..models.model import Model
3434
from ..session.session_manager import SessionManager
3535
from ..telemetry.metrics import EventLoopMetrics
36-
from ..telemetry.tracer import get_tracer
36+
from ..telemetry.tracer import get_tracer, serialize
3737
from ..tools.registry import ToolRegistry
3838
from ..tools.watcher import ToolWatcher
3939
from ..types.content import ContentBlock, Message, Messages
@@ -445,27 +445,48 @@ async def structured_output_async(
445445
ValueError: If no conversation history or prompt is provided.
446446
"""
447447
self.hooks.invoke_callbacks(BeforeInvocationEvent(agent=self))
448-
449-
try:
450-
if not self.messages and not prompt:
451-
raise ValueError("No conversation history or prompt provided")
452-
453-
# Create temporary messages array if prompt is provided
454-
if prompt:
455-
content: list[ContentBlock] = [{"text": prompt}] if isinstance(prompt, str) else prompt
456-
temp_messages = self.messages + [{"role": "user", "content": content}]
457-
else:
458-
temp_messages = self.messages
459-
460-
events = self.model.structured_output(output_model, temp_messages, system_prompt=self.system_prompt)
461-
async for event in events:
462-
if "callback" in event:
463-
self.callback_handler(**cast(dict, event["callback"]))
464-
465-
return event["output"]
466-
467-
finally:
468-
self.hooks.invoke_callbacks(AfterInvocationEvent(agent=self))
448+
with self.tracer.tracer.start_as_current_span(
449+
"execute_structured_output", kind=trace_api.SpanKind.CLIENT
450+
) as structured_output_span:
451+
try:
452+
if not self.messages and not prompt:
453+
raise ValueError("No conversation history or prompt provided")
454+
# Create temporary messages array if prompt is provided
455+
if prompt:
456+
content: list[ContentBlock] = [{"text": prompt}] if isinstance(prompt, str) else prompt
457+
temp_messages = self.messages + [{"role": "user", "content": content}]
458+
else:
459+
temp_messages = self.messages
460+
461+
structured_output_span.set_attributes(
462+
{
463+
"gen_ai.system": "strands-agents",
464+
"gen_ai.agent.name": self.name,
465+
"gen_ai.agent.id": self.agent_id,
466+
"gen_ai.operation.name": "execute_structured_output",
467+
}
468+
)
469+
for message in temp_messages:
470+
structured_output_span.add_event(
471+
f"gen_ai.{message['role']}.message",
472+
attributes={"role": message["role"], "content": serialize(message["content"])},
473+
)
474+
if self.system_prompt:
475+
structured_output_span.add_event(
476+
"gen_ai.system.message",
477+
attributes={"role": "system", "content": serialize([{"text": self.system_prompt}])},
478+
)
479+
events = self.model.structured_output(output_model, temp_messages, system_prompt=self.system_prompt)
480+
async for event in events:
481+
if "callback" in event:
482+
self.callback_handler(**cast(dict, event["callback"]))
483+
structured_output_span.add_event(
484+
"gen_ai.choice", attributes={"message": serialize(event["output"].model_dump())}
485+
)
486+
return event["output"]
487+
488+
finally:
489+
self.hooks.invoke_callbacks(AfterInvocationEvent(agent=self))
469490

470491
async def stream_async(self, prompt: Union[str, list[ContentBlock]], **kwargs: Any) -> AsyncIterator[Any]:
471492
"""Process a natural language prompt and yield events as an async iterator.

tests/strands/agent/test_agent.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -980,6 +980,14 @@ def test_agent_callback_handler_custom_handler_used():
980980

981981

982982
def test_agent_structured_output(agent, system_prompt, user, agenerator):
983+
# Setup mock tracer and span
984+
mock_strands_tracer = unittest.mock.MagicMock()
985+
mock_otel_tracer = unittest.mock.MagicMock()
986+
mock_span = unittest.mock.MagicMock()
987+
mock_strands_tracer.tracer = mock_otel_tracer
988+
mock_otel_tracer.start_as_current_span.return_value.__enter__.return_value = mock_span
989+
agent.tracer = mock_strands_tracer
990+
983991
agent.model.structured_output = unittest.mock.Mock(return_value=agenerator([{"output": user}]))
984992

985993
prompt = "Jane Doe is 30 years old and her email is [email protected]"
@@ -999,8 +1007,34 @@ def test_agent_structured_output(agent, system_prompt, user, agenerator):
9991007
type(user), [{"role": "user", "content": [{"text": prompt}]}], system_prompt=system_prompt
10001008
)
10011009

1010+
mock_span.set_attributes.assert_called_once_with(
1011+
{
1012+
"gen_ai.system": "strands-agents",
1013+
"gen_ai.agent.name": "Strands Agents",
1014+
"gen_ai.agent.id": "default",
1015+
"gen_ai.operation.name": "execute_structured_output",
1016+
}
1017+
)
1018+
1019+
mock_span.add_event.assert_any_call(
1020+
"gen_ai.user.message",
1021+
attributes={"role": "user", "content": '[{"text": "Jane Doe is 30 years old and her email is [email protected]"}]'},
1022+
)
1023+
1024+
mock_span.add_event.assert_called_with(
1025+
"gen_ai.choice",
1026+
attributes={"message": json.dumps(user.model_dump())},
1027+
)
1028+
10021029

10031030
def test_agent_structured_output_multi_modal_input(agent, system_prompt, user, agenerator):
1031+
# Setup mock tracer and span
1032+
mock_strands_tracer = unittest.mock.MagicMock()
1033+
mock_otel_tracer = unittest.mock.MagicMock()
1034+
mock_span = unittest.mock.MagicMock()
1035+
mock_strands_tracer.tracer = mock_otel_tracer
1036+
mock_otel_tracer.start_as_current_span.return_value.__enter__.return_value = mock_span
1037+
agent.tracer = mock_strands_tracer
10041038
agent.model.structured_output = unittest.mock.Mock(return_value=agenerator([{"output": user}]))
10051039

10061040
prompt = [
@@ -1030,6 +1064,11 @@ def test_agent_structured_output_multi_modal_input(agent, system_prompt, user, a
10301064
type(user), [{"role": "user", "content": prompt}], system_prompt=system_prompt
10311065
)
10321066

1067+
mock_span.add_event.assert_called_with(
1068+
"gen_ai.choice",
1069+
attributes={"message": json.dumps(user.model_dump())},
1070+
)
1071+
10331072

10341073
@pytest.mark.asyncio
10351074
async def test_agent_structured_output_in_async_context(agent, user, agenerator):

0 commit comments

Comments
 (0)