@@ -121,31 +121,128 @@ async def _some_function_traced(a, b, c):
121121 )
122122
123123
124- def test_span_templates (sentry_init , capture_events ):
124+ def test_span_templates_ai_dicts (sentry_init , capture_events ):
125125 sentry_init (traces_sample_rate = 1.0 )
126126 events = capture_events ()
127127
128128 @sentry_sdk .trace (template = SPANTEMPLATE .AI_TOOL )
129129 def my_tool (arg1 , arg2 ):
130- mock_usage = mock .Mock ()
131- mock_usage .input_tokens = 11
132- mock_usage .output_tokens = 22
133- mock_usage .total_tokens = 33
134-
135130 return {
136131 "output" : "my_tool_result" ,
137- "usage" : mock_usage ,
132+ "usage" : {
133+ "prompt_tokens" : 10 ,
134+ "completion_tokens" : 20 ,
135+ "total_tokens" : 30 ,
136+ },
138137 }
139138
139+ @sentry_sdk .trace (template = SPANTEMPLATE .AI_CHAT )
140+ def my_chat (model = None , ** kwargs ):
141+ return {
142+ "content" : "my_chat_result" ,
143+ "usage" : {
144+ "input_tokens" : 11 ,
145+ "output_tokens" : 22 ,
146+ "total_tokens" : 33 ,
147+ },
148+ "model" : f"{ model } -v123" ,
149+ }
150+
151+ @sentry_sdk .trace (template = SPANTEMPLATE .AI_AGENT )
152+ def my_agent ():
153+ my_tool (1 , 2 )
154+ my_chat (
155+ model = "my-gpt-4o-mini" ,
156+ prompt = "What is the weather in Tokyo?" ,
157+ system_prompt = "You are a helpful assistant that can answer questions about the weather." ,
158+ max_tokens = 100 ,
159+ temperature = 0.5 ,
160+ top_p = 0.9 ,
161+ top_k = 40 ,
162+ frequency_penalty = 1.0 ,
163+ presence_penalty = 2.0 ,
164+ )
165+
166+ with sentry_sdk .start_transaction (name = "test-transaction" ):
167+ my_agent ()
168+
169+ (event ,) = events
170+ (agent_span , tool_span , chat_span ) = event ["spans" ]
171+
172+ assert agent_span ["op" ] == "gen_ai.invoke_agent"
173+ assert (
174+ agent_span ["description" ]
175+ == "invoke_agent test_decorator.test_span_templates_ai_dicts.<locals>.my_agent"
176+ )
177+ assert agent_span ["data" ] == {
178+ "gen_ai.agent.name" : "test_decorator.test_span_templates_ai_dicts.<locals>.my_agent" ,
179+ "gen_ai.operation.name" : "invoke_agent" ,
180+ "thread.id" : mock .ANY ,
181+ "thread.name" : mock .ANY ,
182+ }
183+
184+ assert tool_span ["op" ] == "gen_ai.execute_tool"
185+ assert (
186+ tool_span ["description" ]
187+ == "execute_tool test_decorator.test_span_templates_ai_dicts.<locals>.my_tool"
188+ )
189+ assert tool_span ["data" ] == {
190+ "gen_ai.tool.name" : "test_decorator.test_span_templates_ai_dicts.<locals>.my_tool" ,
191+ "gen_ai.operation.name" : "execute_tool" ,
192+ "gen_ai.usage.input_tokens" : 10 ,
193+ "gen_ai.usage.output_tokens" : 20 ,
194+ "gen_ai.usage.total_tokens" : 30 ,
195+ "thread.id" : mock .ANY ,
196+ "thread.name" : mock .ANY ,
197+ }
198+
199+ assert chat_span ["op" ] == "gen_ai.chat"
200+ assert chat_span ["description" ] == "chat my-gpt-4o-mini"
201+ assert chat_span ["data" ] == {
202+ "gen_ai.operation.name" : "chat" ,
203+ "gen_ai.request.frequency_penalty" : 1.0 ,
204+ "gen_ai.request.max_tokens" : 100 ,
205+ "gen_ai.request.messages" : "[{'role': 'user', 'content': 'What is the weather in Tokyo?'}, {'role': 'system', 'content': 'You are a helpful assistant that can answer questions about the weather.'}]" ,
206+ "gen_ai.request.model" : "my-gpt-4o-mini" ,
207+ "gen_ai.request.presence_penalty" : 2.0 ,
208+ "gen_ai.request.temperature" : 0.5 ,
209+ "gen_ai.request.top_k" : 40 ,
210+ "gen_ai.request.top_p" : 0.9 ,
211+ "gen_ai.response.model" : "my-gpt-4o-mini-v123" ,
212+ "gen_ai.usage.input_tokens" : 11 ,
213+ "gen_ai.usage.output_tokens" : 22 ,
214+ "gen_ai.usage.total_tokens" : 33 ,
215+ "thread.id" : mock .ANY ,
216+ "thread.name" : mock .ANY ,
217+ }
218+
219+
220+ def test_span_templates_ai_objects (sentry_init , capture_events ):
221+ sentry_init (traces_sample_rate = 1.0 )
222+ events = capture_events ()
223+
224+ @sentry_sdk .trace (template = SPANTEMPLATE .AI_TOOL )
225+ def my_tool (arg1 , arg2 ):
226+ mock_usage = mock .Mock ()
227+ mock_usage .prompt_tokens = 10
228+ mock_usage .completion_tokens = 20
229+ mock_usage .total_tokens = 30
230+
231+ mock_result = mock .Mock ()
232+ mock_result .output = "my_tool_result"
233+ mock_result .usage = mock_usage
234+
235+ return mock_result
236+
140237 @sentry_sdk .trace (template = SPANTEMPLATE .AI_CHAT )
141238 def my_chat (model = None , ** kwargs ):
142239 mock_result = mock .Mock ()
143240 mock_result .content = "my_chat_result"
144- mock_result .usage = {
145- "prompt_tokens" : 10 ,
146- "completion_tokens" : 20 ,
147- " total_tokens" : 30 ,
148- }
241+ mock_result .usage = mock . Mock (
242+ input_tokens = 11 ,
243+ output_tokens = 22 ,
244+ total_tokens = 33 ,
245+ )
149246 mock_result .model = f"{ model } -v123"
150247
151248 return mock_result
@@ -174,10 +271,10 @@ def my_agent():
174271 assert agent_span ["op" ] == "gen_ai.invoke_agent"
175272 assert (
176273 agent_span ["description" ]
177- == "invoke_agent test_decorator.test_span_templates .<locals>.my_agent"
274+ == "invoke_agent test_decorator.test_span_templates_ai_objects .<locals>.my_agent"
178275 )
179276 assert agent_span ["data" ] == {
180- "gen_ai.agent.name" : "test_decorator.test_span_templates .<locals>.my_agent" ,
277+ "gen_ai.agent.name" : "test_decorator.test_span_templates_ai_objects .<locals>.my_agent" ,
181278 "gen_ai.operation.name" : "invoke_agent" ,
182279 "thread.id" : mock .ANY ,
183280 "thread.name" : mock .ANY ,
@@ -186,14 +283,14 @@ def my_agent():
186283 assert tool_span ["op" ] == "gen_ai.execute_tool"
187284 assert (
188285 tool_span ["description" ]
189- == "execute_tool test_decorator.test_span_templates .<locals>.my_tool"
286+ == "execute_tool test_decorator.test_span_templates_ai_objects .<locals>.my_tool"
190287 )
191288 assert tool_span ["data" ] == {
192- "gen_ai.tool.name" : "test_decorator.test_span_templates .<locals>.my_tool" ,
289+ "gen_ai.tool.name" : "test_decorator.test_span_templates_ai_objects .<locals>.my_tool" ,
193290 "gen_ai.operation.name" : "execute_tool" ,
194- "gen_ai.usage.input_tokens" : 11 ,
195- "gen_ai.usage.output_tokens" : 22 ,
196- "gen_ai.usage.total_tokens" : 33 ,
291+ "gen_ai.usage.input_tokens" : 10 ,
292+ "gen_ai.usage.output_tokens" : 20 ,
293+ "gen_ai.usage.total_tokens" : 30 ,
197294 "thread.id" : mock .ANY ,
198295 "thread.name" : mock .ANY ,
199296 }
@@ -211,9 +308,9 @@ def my_agent():
211308 "gen_ai.request.top_k" : 40 ,
212309 "gen_ai.request.top_p" : 0.9 ,
213310 "gen_ai.response.model" : "my-gpt-4o-mini-v123" ,
214- "gen_ai.usage.input_tokens" : 10 ,
215- "gen_ai.usage.output_tokens" : 20 ,
216- "gen_ai.usage.total_tokens" : 30 ,
311+ "gen_ai.usage.input_tokens" : 11 ,
312+ "gen_ai.usage.output_tokens" : 22 ,
313+ "gen_ai.usage.total_tokens" : 33 ,
217314 "thread.id" : mock .ANY ,
218315 "thread.name" : mock .ANY ,
219316 }
0 commit comments