Skip to content

Commit 47c5c09

Browse files
committed
chore(trace): updated semantic conventions with tool mappings
1 parent 729beca commit 47c5c09

File tree

3 files changed

+122
-32
lines changed

3 files changed

+122
-32
lines changed

src/strands/telemetry/tracer.py

Lines changed: 72 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,28 @@ def _set_attributes(self, span: Span, attributes: Dict[str, AttributeValue]) ->
153153
for key, value in attributes.items():
154154
span.set_attribute(key, value)
155155

156+
def _add_optional_usage_and_metrics_attributes(
157+
self, attributes: Dict[str, AttributeValue], usage: Usage, metrics: Metrics
158+
) -> None:
159+
"""Add optional usage and metrics attributes if they have values.
160+
161+
Args:
162+
attributes: Dictionary to add attributes to
163+
usage: Token usage information from the model call
164+
metrics: Metrics from the model call
165+
"""
166+
if usage.get("cacheReadInputTokens", 0) > 0:
167+
attributes["gen_ai.usage.cache_read_input_tokens"] = usage["cacheReadInputTokens"]
168+
169+
if usage.get("cacheWriteInputTokens", 0) > 0:
170+
attributes["gen_ai.usage.cache_write_input_tokens"] = usage["cacheWriteInputTokens"]
171+
172+
if metrics.get("timeToFirstByteMs", 0) > 0:
173+
attributes["gen_ai.server.time_to_first_token"] = metrics["timeToFirstByteMs"]
174+
175+
if metrics.get("latencyMs", 0) > 0:
176+
attributes["gen_ai.server.request.duration"] = metrics["latencyMs"]
177+
156178
def _end_span(
157179
self,
158180
span: Span,
@@ -301,12 +323,11 @@ def end_model_invoke_span(
301323
"gen_ai.usage.completion_tokens": usage["outputTokens"],
302324
"gen_ai.usage.output_tokens": usage["outputTokens"],
303325
"gen_ai.usage.total_tokens": usage["totalTokens"],
304-
"gen_ai.usage.cache_read_input_tokens": usage.get("cacheReadInputTokens", 0),
305-
"gen_ai.usage.cache_write_input_tokens": usage.get("cacheWriteInputTokens", 0),
306-
"gen_ai.server.time_to_first_token": metrics.get("timeToFirstByteMs", 0),
307-
"gen_ai.server.request.duration": metrics.get("latencyMs", 0),
308326
}
309327

328+
# Add optional attributes if they have values
329+
self._add_optional_usage_and_metrics_attributes(attributes, usage, metrics)
330+
310331
if self.use_latest_genai_conventions:
311332
self._add_event(
312333
span,
@@ -316,7 +337,7 @@ def end_model_invoke_span(
316337
[
317338
{
318339
"role": message["role"],
319-
"parts": [{"type": "text", "content": message["content"]}],
340+
"parts": self._map_content_blocks_to_otel_parts(message["content"]),
320341
"finish_reason": str(stop_reason),
321342
}
322343
]
@@ -371,7 +392,7 @@ def start_tool_call_span(self, tool: ToolUse, parent_span: Optional[Span] = None
371392
"type": "tool_call",
372393
"name": tool["name"],
373394
"id": tool["toolUseId"],
374-
"arguments": [{"content": tool["input"]}],
395+
"arguments": tool["input"],
375396
}
376397
],
377398
}
@@ -426,7 +447,7 @@ def end_tool_call_span(
426447
{
427448
"type": "tool_call_response",
428449
"id": tool_result.get("toolUseId", ""),
429-
"result": tool_result.get("content"),
450+
"response": tool_result.get("content"),
430451
}
431452
],
432453
}
@@ -513,7 +534,7 @@ def end_event_loop_cycle_span(
513534
[
514535
{
515536
"role": tool_result_message["role"],
516-
"parts": [{"type": "text", "content": tool_result_message["content"]}],
537+
"parts": self._map_content_blocks_to_otel_parts(tool_result_message["content"]),
517538
}
518539
]
519540
)
@@ -643,19 +664,23 @@ def start_multiagent_span(
643664
)
644665

645666
span = self._start_span(operation, attributes=attributes, span_kind=trace_api.SpanKind.CLIENT)
646-
content = serialize(task) if isinstance(task, list) else task
647667

648668
if self.use_latest_genai_conventions:
669+
parts: list[dict[str, Any]] = []
670+
if isinstance(task, list):
671+
parts = self._map_content_blocks_to_otel_parts(task)
672+
else:
673+
parts = [{"type": "text", "content": task}]
649674
self._add_event(
650675
span,
651676
"gen_ai.client.inference.operation.details",
652-
{"gen_ai.input.messages": serialize([{"role": "user", "parts": [{"type": "text", "content": task}]}])},
677+
{"gen_ai.input.messages": serialize([{"role": "user", "parts": parts}])},
653678
)
654679
else:
655680
self._add_event(
656681
span,
657682
"gen_ai.user.message",
658-
event_attributes={"content": content},
683+
event_attributes={"content": serialize(task) if isinstance(task, list) else task},
659684
)
660685

661686
return span
@@ -727,7 +752,7 @@ def _add_event_messages(self, span: Span, messages: Messages) -> None:
727752
input_messages: list = []
728753
for message in messages:
729754
input_messages.append(
730-
{"role": message["role"], "parts": [{"type": "text", "content": message["content"]}]}
755+
{"role": message["role"], "parts": self._map_content_blocks_to_otel_parts(message["content"])}
731756
)
732757
self._add_event(
733758
span, "gen_ai.client.inference.operation.details", {"gen_ai.input.messages": serialize(input_messages)}
@@ -740,6 +765,41 @@ def _add_event_messages(self, span: Span, messages: Messages) -> None:
740765
{"content": serialize(message["content"])},
741766
)
742767

768+
def _map_content_blocks_to_otel_parts(self, content_blocks: list[ContentBlock]) -> list[dict[str, Any]]:
769+
"""Map ContentBlock objects to OpenTelemetry parts format."""
770+
parts: list[dict[str, Any]] = []
771+
772+
for block in content_blocks:
773+
if "text" in block:
774+
# Standard TextPart
775+
parts.append({"type": "text", "content": block["text"]})
776+
elif "toolUse" in block:
777+
# Standard ToolCallRequestPart
778+
tool_use = block["toolUse"]
779+
parts.append(
780+
{
781+
"type": "tool_call",
782+
"name": tool_use["name"],
783+
"id": tool_use["toolUseId"],
784+
"arguments": tool_use["input"],
785+
}
786+
)
787+
elif "toolResult" in block:
788+
# Standard ToolCallResponsePart
789+
tool_result = block["toolResult"]
790+
parts.append(
791+
{
792+
"type": "tool_call_response",
793+
"id": tool_result["toolUseId"],
794+
"response": tool_result["content"],
795+
}
796+
)
797+
else:
798+
# For all other ContentBlock types, use the key as type and value as content
799+
for key, value in block.items():
800+
parts.append({"type": key, "content": value})
801+
return parts
802+
743803

744804
# Singleton instance for global access
745805
_tracer_instance = None

src/strands/types/event_loop.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ class Metrics(TypedDict, total=False):
2828
2929
Attributes:
3030
latencyMs (int): Latency of the model request in milliseconds.
31-
timeToFirstByteMs (int): TimeToFirstByte of the first chunk from the model in milliseconds.
31+
timeToFirstByteMs (int): Latency from sending model request to first
32+
content chunk (contentBlockDelta or contentBlockStart) from the model in milliseconds.
3233
"""
3334

3435
latencyMs: Required[int]

tests/strands/telemetry/test_tracer.py

Lines changed: 48 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,15 @@ def test_start_model_invoke_span_latest_conventions(mock_tracer):
173173
mock_span = mock.MagicMock()
174174
mock_tracer.start_span.return_value = mock_span
175175

176-
messages = [{"role": "user", "content": [{"text": "Hello"}]}]
176+
messages = [
177+
{"role": "user", "content": [{"text": "Hello 2025-1993"}]},
178+
{
179+
"role": "assistant",
180+
"content": [
181+
{"toolUse": {"input": '"expression": "2025-1993"', "name": "calculator", "toolUseId": "123"}}
182+
],
183+
},
184+
]
177185
model_id = "test-model"
178186

179187
span = tracer.start_model_invoke_span(messages=messages, agent_name="TestAgent", model_id=model_id)
@@ -191,8 +199,19 @@ def test_start_model_invoke_span_latest_conventions(mock_tracer):
191199
[
192200
{
193201
"role": messages[0]["role"],
194-
"parts": [{"type": "text", "content": messages[0]["content"]}],
195-
}
202+
"parts": [{"type": "text", "content": "Hello 2025-1993"}],
203+
},
204+
{
205+
"role": messages[1]["role"],
206+
"parts": [
207+
{
208+
"type": "tool_call",
209+
"name": "calculator",
210+
"id": "123",
211+
"arguments": '"expression": "2025-1993"',
212+
}
213+
],
214+
},
196215
]
197216
)
198217
},
@@ -215,8 +234,6 @@ def test_end_model_invoke_span(mock_span):
215234
mock_span.set_attribute.assert_any_call("gen_ai.usage.completion_tokens", 20)
216235
mock_span.set_attribute.assert_any_call("gen_ai.usage.output_tokens", 20)
217236
mock_span.set_attribute.assert_any_call("gen_ai.usage.total_tokens", 30)
218-
mock_span.set_attribute.assert_any_call("gen_ai.usage.cache_read_input_tokens", 0)
219-
mock_span.set_attribute.assert_any_call("gen_ai.usage.cache_write_input_tokens", 0)
220237
mock_span.set_attribute.assert_any_call("gen_ai.server.request.duration", 20)
221238
mock_span.set_attribute.assert_any_call("gen_ai.server.time_to_first_token", 10)
222239
mock_span.add_event.assert_called_with(
@@ -244,8 +261,6 @@ def test_end_model_invoke_span_latest_conventions(mock_span):
244261
mock_span.set_attribute.assert_any_call("gen_ai.usage.completion_tokens", 20)
245262
mock_span.set_attribute.assert_any_call("gen_ai.usage.output_tokens", 20)
246263
mock_span.set_attribute.assert_any_call("gen_ai.usage.total_tokens", 30)
247-
mock_span.set_attribute.assert_any_call("gen_ai.usage.cache_read_input_tokens", 0)
248-
mock_span.set_attribute.assert_any_call("gen_ai.usage.cache_write_input_tokens", 0)
249264
mock_span.set_attribute.assert_any_call("gen_ai.server.time_to_first_token", 10)
250265
mock_span.set_attribute.assert_any_call("gen_ai.server.request.duration", 20)
251266
mock_span.add_event.assert_called_with(
@@ -255,7 +270,7 @@ def test_end_model_invoke_span_latest_conventions(mock_span):
255270
[
256271
{
257272
"role": "assistant",
258-
"parts": [{"type": "text", "content": message["content"]}],
273+
"parts": [{"type": "text", "content": "Response"}],
259274
"finish_reason": "end_turn",
260275
}
261276
]
@@ -324,7 +339,7 @@ def test_start_tool_call_span_latest_conventions(mock_tracer):
324339
"type": "tool_call",
325340
"name": tool["name"],
326341
"id": tool["toolUseId"],
327-
"arguments": [{"content": tool["input"]}],
342+
"arguments": tool["input"],
328343
}
329344
],
330345
}
@@ -404,7 +419,7 @@ def test_start_swarm_span_with_contentblock_task_latest_conventions(mock_tracer)
404419
"gen_ai.client.inference.operation.details",
405420
attributes={
406421
"gen_ai.input.messages": serialize(
407-
[{"role": "user", "parts": [{"type": "text", "content": [{"text": "Original Task: foo bar"}]}]}]
422+
[{"role": "user", "parts": [{"type": "text", "content": "Original Task: foo bar"}]}]
408423
)
409424
},
410425
)
@@ -492,7 +507,7 @@ def test_end_tool_call_span_latest_conventions(mock_span):
492507
"""Test ending a tool call span with the latest semantic conventions."""
493508
tracer = Tracer()
494509
tracer.use_latest_genai_conventions = True
495-
tool_result = {"status": "success", "content": [{"text": "Tool result"}]}
510+
tool_result = {"status": "success", "content": [{"text": "Tool result"}, {"json": {"foo": "bar"}}]}
496511

497512
tracer.end_tool_call_span(mock_span, tool_result)
498513

@@ -508,7 +523,7 @@ def test_end_tool_call_span_latest_conventions(mock_span):
508523
{
509524
"type": "tool_call_response",
510525
"id": tool_result.get("toolUseId", ""),
511-
"result": tool_result.get("content"),
526+
"response": tool_result.get("content"),
512527
}
513528
],
514529
}
@@ -564,9 +579,7 @@ def test_start_event_loop_cycle_span_latest_conventions(mock_tracer):
564579
mock_span.add_event.assert_any_call(
565580
"gen_ai.client.inference.operation.details",
566581
attributes={
567-
"gen_ai.input.messages": serialize(
568-
[{"role": "user", "parts": [{"type": "text", "content": messages[0]["content"]}]}]
569-
)
582+
"gen_ai.input.messages": serialize([{"role": "user", "parts": [{"type": "text", "content": "Hello"}]}])
570583
},
571584
)
572585
assert span is not None
@@ -576,7 +589,12 @@ def test_end_event_loop_cycle_span(mock_span):
576589
"""Test ending an event loop cycle span."""
577590
tracer = Tracer()
578591
message = {"role": "assistant", "content": [{"text": "Response"}]}
579-
tool_result_message = {"role": "assistant", "content": [{"toolResult": {"response": "Success"}}]}
592+
tool_result_message = {
593+
"role": "assistant",
594+
"content": [
595+
{"toolResult": {"toolUseId": "123", "status": "success", "content": [{"text": "Weather is sunny"}]}}
596+
],
597+
}
580598

581599
tracer.end_event_loop_cycle_span(mock_span, message, tool_result_message)
582600

@@ -596,7 +614,12 @@ def test_end_event_loop_cycle_span_latest_conventions(mock_span):
596614
tracer = Tracer()
597615
tracer.use_latest_genai_conventions = True
598616
message = {"role": "assistant", "content": [{"text": "Response"}]}
599-
tool_result_message = {"role": "assistant", "content": [{"toolResult": {"response": "Success"}}]}
617+
tool_result_message = {
618+
"role": "assistant",
619+
"content": [
620+
{"toolResult": {"toolUseId": "123", "status": "success", "content": [{"text": "Weather is sunny"}]}}
621+
],
622+
}
600623

601624
tracer.end_event_loop_cycle_span(mock_span, message, tool_result_message)
602625

@@ -607,7 +630,13 @@ def test_end_event_loop_cycle_span_latest_conventions(mock_span):
607630
[
608631
{
609632
"role": "assistant",
610-
"parts": [{"type": "text", "content": tool_result_message["content"]}],
633+
"parts": [
634+
{
635+
"type": "tool_call_response",
636+
"id": "123",
637+
"response": [{"text": "Weather is sunny"}],
638+
}
639+
],
611640
}
612641
]
613642
)
@@ -682,7 +711,7 @@ def test_start_agent_span_latest_conventions(mock_tracer):
682711
"gen_ai.client.inference.operation.details",
683712
attributes={
684713
"gen_ai.input.messages": serialize(
685-
[{"role": "user", "parts": [{"type": "text", "content": [{"text": "test prompt"}]}]}]
714+
[{"role": "user", "parts": [{"type": "text", "content": "test prompt"}]}]
686715
)
687716
},
688717
)

0 commit comments

Comments
 (0)