Skip to content

Commit caafc14

Browse files
committed
Added test
1 parent 04b976a commit caafc14

File tree

2 files changed

+100
-12
lines changed

2 files changed

+100
-12
lines changed

sentry_sdk/tracing_utils.py

Lines changed: 32 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,18 +1051,36 @@ def _get_usage_attributes(usage):
10511051
"""
10521052
attributes = {}
10531053

1054-
if hasattr(usage, "prompt_tokens") and isinstance(usage.prompt_tokens, int):
1055-
attributes[SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] = usage.prompt_tokens
1056-
elif hasattr(usage, "input_tokens") and isinstance(usage.input_tokens, int):
1057-
attributes[SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] = usage.input_tokens
1058-
elif hasattr(usage, "completion_tokens") and isinstance(
1059-
usage.completion_tokens, int
1060-
):
1061-
attributes[SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] = usage.output_tokens
1062-
elif hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int):
1063-
attributes[SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] = usage.completion_tokens
1064-
elif hasattr(usage, "total_tokens") and isinstance(usage.total_tokens, int):
1065-
attributes[SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] = usage.total_tokens
1054+
if isinstance(usage, dict):
1055+
# input tokens
1056+
if "prompt_tokens" in usage and isinstance(usage["prompt_tokens"], int):
1057+
attributes[SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] = usage["prompt_tokens"]
1058+
if "input_tokens" in usage and isinstance(usage["input_tokens"], int):
1059+
attributes[SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] = usage["input_tokens"]
1060+
# output tokens
1061+
if "completion_tokens" in usage and isinstance(usage["completion_tokens"], int):
1062+
attributes[SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] = usage["completion_tokens"]
1063+
if "output_tokens" in usage and isinstance(usage["output_tokens"], int):
1064+
attributes[SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] = usage["output_tokens"]
1065+
# total tokens
1066+
if "total_tokens" in usage and isinstance(usage["total_tokens"], int):
1067+
attributes[SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] = usage["total_tokens"]
1068+
else:
1069+
# input tokens
1070+
if hasattr(usage, "prompt_tokens") and isinstance(usage.prompt_tokens, int):
1071+
attributes[SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] = usage.prompt_tokens
1072+
if hasattr(usage, "input_tokens") and isinstance(usage.input_tokens, int):
1073+
attributes[SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] = usage.input_tokens
1074+
# output tokens
1075+
if hasattr(usage, "completion_tokens") and isinstance(
1076+
usage.completion_tokens, int
1077+
):
1078+
attributes[SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] = usage.completion_tokens
1079+
if hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int):
1080+
attributes[SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] = usage.output_tokens
1081+
# total tokens
1082+
if hasattr(usage, "total_tokens") and isinstance(usage.total_tokens, int):
1083+
attributes[SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] = usage.total_tokens
10661084

10671085
return attributes
10681086

@@ -1081,6 +1099,8 @@ def _get_output_attributes(template, send_pii, result):
10811099
elif hasattr(result, "metadata"):
10821100
if hasattr(result.metadata, "usage"):
10831101
attributes.update(_get_usage_attributes(result.metadata.usage))
1102+
elif isinstance(result, dict) and "usage" in result:
1103+
attributes.update(_get_usage_attributes(result["usage"]))
10841104

10851105
elif hasattr(result, "model") and isinstance(result.model, str):
10861106
attributes[SPANDATA.GEN_AI_RESPONSE_MODEL] = result.model

tests/tracing/test_decorator.py

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33

44
import pytest
55

6+
import sentry_sdk
7+
from sentry_sdk.consts import SPANTEMPLATE
68
from sentry_sdk.tracing import trace
79
from sentry_sdk.tracing_utils import create_span_decorator
810
from sentry_sdk.utils import logger
@@ -117,3 +119,69 @@ async def _some_function_traced(a, b, c):
117119
assert inspect.getcallargs(_some_function, 1, 2, 3) == inspect.getcallargs(
118120
_some_function_traced, 1, 2, 3
119121
)
122+
123+
124+
def test_span_templates(sentry_init, capture_events):
125+
sentry_init(traces_sample_rate=1.0)
126+
events = capture_events()
127+
128+
@sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL)
129+
def my_tool(arg1, arg2):
130+
return "my_tool_result"
131+
132+
@sentry_sdk.trace(template=SPANTEMPLATE.AI_CHAT)
133+
def my_chat():
134+
return {
135+
"content": "my_chat_result",
136+
"usage": {
137+
"prompt_tokens": 10,
138+
"completion_tokens": 20,
139+
"total_tokens": 30,
140+
},
141+
}
142+
143+
@sentry_sdk.trace(template=SPANTEMPLATE.AI_AGENT)
144+
def my_agent():
145+
my_tool(1, 2)
146+
my_chat()
147+
148+
with sentry_sdk.start_transaction(name="test-transaction"):
149+
my_agent()
150+
151+
(event,) = events
152+
(agent_span, tool_span, chat_span) = event["spans"]
153+
154+
assert agent_span["op"] == "gen_ai.invoke_agent"
155+
assert (
156+
agent_span["description"]
157+
== "invoke_agent test_decorator.test_span_templates.<locals>.my_agent"
158+
)
159+
assert agent_span["data"] == {
160+
"gen_ai.agent.name": "test_decorator.test_span_templates.<locals>.my_agent",
161+
"gen_ai.operation.name": "invoke_agent",
162+
"thread.id": mock.ANY,
163+
"thread.name": mock.ANY,
164+
}
165+
166+
assert tool_span["op"] == "gen_ai.execute_tool"
167+
assert (
168+
tool_span["description"]
169+
== "execute_tool test_decorator.test_span_templates.<locals>.my_tool"
170+
)
171+
assert tool_span["data"] == {
172+
"gen_ai.tool.name": "test_decorator.test_span_templates.<locals>.my_tool",
173+
"gen_ai.operation.name": "execute_tool",
174+
"thread.id": mock.ANY,
175+
"thread.name": mock.ANY,
176+
}
177+
178+
assert chat_span["op"] == "gen_ai.chat"
179+
assert chat_span["description"] == "chat"
180+
assert chat_span["data"] == {
181+
"gen_ai.operation.name": "chat",
182+
"gen_ai.usage.input_tokens": 10,
183+
"gen_ai.usage.output_tokens": 20,
184+
"gen_ai.usage.total_tokens": 30,
185+
"thread.id": mock.ANY,
186+
"thread.name": mock.ANY,
187+
}

0 commit comments

Comments
 (0)