11from functools import wraps
2+ import json
23from typing import TYPE_CHECKING
34
45import sentry_sdk
56from sentry_sdk .ai .monitoring import record_token_usage
7+ from sentry_sdk .ai .utils import set_data_normalized
68from sentry_sdk .consts import OP , SPANDATA
79from sentry_sdk .integrations import _check_minimum_version , DidNotEnable , Integration
810from sentry_sdk .scope import should_send_default_pii
911from sentry_sdk .utils import (
1012 capture_internal_exceptions ,
1113 event_from_exception ,
1214 package_version ,
15+ safe_serialize ,
1316)
1417
1518try :
19+ try :
20+ from anthropic import NOT_GIVEN
21+ except ImportError :
22+ NOT_GIVEN = None
23+
1624 from anthropic .resources import AsyncMessages , Messages
1725
1826 if TYPE_CHECKING :
@@ -53,8 +61,11 @@ def _capture_exception(exc):
5361 sentry_sdk .capture_event (event , hint = hint )
5462
5563
56- def _calculate_token_usage (result , span ):
57- # type: (Messages, Span) -> None
64+ def _get_token_usage (result ):
65+ # type: (Messages) -> tuple[int, int]
66+ """
67+ Get token usage from the Anthropic response.
68+ """
5869 input_tokens = 0
5970 output_tokens = 0
6071 if hasattr (result , "usage" ):
@@ -64,44 +75,21 @@ def _calculate_token_usage(result, span):
6475 if hasattr (usage , "output_tokens" ) and isinstance (usage .output_tokens , int ):
6576 output_tokens = usage .output_tokens
6677
67- total_tokens = input_tokens + output_tokens
78+ return input_tokens , output_tokens
6879
69- record_token_usage (
70- span ,
71- input_tokens = input_tokens ,
72- output_tokens = output_tokens ,
73- total_tokens = total_tokens ,
74- )
7580
76-
77- def _get_responses (content ):
78- # type: (list[Any]) -> list[dict[str, Any]]
81+ def _collect_ai_data (event , model , input_tokens , output_tokens , content_blocks ):
82+ # type: (MessageStreamEvent, str | None, int, int, list[str]) -> tuple[str | None, int, int, list[str]]
7983 """
80- Get JSON of a Anthropic responses.
81- """
82- responses = []
83- for item in content :
84- if hasattr (item , "text" ):
85- responses .append (
86- {
87- "type" : item .type ,
88- "text" : item .text ,
89- }
90- )
91- return responses
92-
93-
94- def _collect_ai_data (event , input_tokens , output_tokens , content_blocks ):
95- # type: (MessageStreamEvent, int, int, list[str]) -> tuple[int, int, list[str]]
96- """
97- Count token usage and collect content blocks from the AI streaming response.
84+ Collect model information, token usage, and collect content blocks from the AI streaming response.
9885 """
9986 with capture_internal_exceptions ():
10087 if hasattr (event , "type" ):
10188 if event .type == "message_start" :
10289 usage = event .message .usage
10390 input_tokens += usage .input_tokens
10491 output_tokens += usage .output_tokens
92+ model = event .message .model or model
10593 elif event .type == "content_block_start" :
10694 pass
10795 elif event .type == "content_block_delta" :
@@ -114,31 +102,80 @@ def _collect_ai_data(event, input_tokens, output_tokens, content_blocks):
114102 elif event .type == "message_delta" :
115103 output_tokens += event .usage .output_tokens
116104
117- return input_tokens , output_tokens , content_blocks
105+ return model , input_tokens , output_tokens , content_blocks
118106
119107
120- def _add_ai_data_to_span (
121- span , integration , input_tokens , output_tokens , content_blocks
122- ):
123- # type: (Span, AnthropicIntegration, int, int, list[str]) -> None
108+ def _set_input_data (span , kwargs , integration ):
109+ # type: (Span, dict[str, Any], AnthropicIntegration) -> None
124110 """
125- Add token usage and content blocks from the AI streaming response to the span .
111+ Set input data for the span based on the provided keyword arguments for the anthropic message creation .
126112 """
127- with capture_internal_exceptions ():
128- if should_send_default_pii () and integration .include_prompts :
129- complete_message = "" .join (content_blocks )
130- span .set_data (
131- SPANDATA .AI_RESPONSES ,
132- [{"type" : "text" , "text" : complete_message }],
133- )
134- total_tokens = input_tokens + output_tokens
135- record_token_usage (
113+ messages = kwargs .get ("messages" )
114+ if (
115+ messages is not None
116+ and len (messages ) > 0
117+ and should_send_default_pii ()
118+ and integration .include_prompts
119+ ):
120+ set_data_normalized (
121+ span , SPANDATA .GEN_AI_REQUEST_MESSAGES , safe_serialize (messages )
122+ )
123+
124+ set_data_normalized (
125+ span , SPANDATA .GEN_AI_RESPONSE_STREAMING , kwargs .get ("stream" , False )
126+ )
127+
128+ kwargs_keys_to_attributes = {
129+ "max_tokens" : SPANDATA .GEN_AI_REQUEST_MAX_TOKENS ,
130+ "model" : SPANDATA .GEN_AI_REQUEST_MODEL ,
131+ "temperature" : SPANDATA .GEN_AI_REQUEST_TEMPERATURE ,
132+ "top_k" : SPANDATA .GEN_AI_REQUEST_TOP_K ,
133+ "top_p" : SPANDATA .GEN_AI_REQUEST_TOP_P ,
134+ }
135+ for key , attribute in kwargs_keys_to_attributes .items ():
136+ value = kwargs .get (key )
137+ if value is not NOT_GIVEN and value is not None :
138+ set_data_normalized (span , attribute , value )
139+
140+ # Input attributes: Tools
141+ tools = kwargs .get ("tools" )
142+ if tools is not NOT_GIVEN and tools is not None and len (tools ) > 0 :
143+ set_data_normalized (
144+ span , SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS , safe_serialize (tools )
145+ )
146+
147+
148+ def _set_output_data (
149+ span ,
150+ integration ,
151+ model ,
152+ input_tokens ,
153+ output_tokens ,
154+ content_blocks ,
155+ finish_span = False ,
156+ ):
157+ # type: (Span, AnthropicIntegration, str | None, int | None, int | None, list[Any], bool) -> None
158+ """
159+ Set output data for the span based on the AI response."""
160+ span .set_data (SPANDATA .GEN_AI_RESPONSE_MODEL , model )
161+ if should_send_default_pii () and integration .include_prompts :
162+ set_data_normalized (
136163 span ,
137- input_tokens = input_tokens ,
138- output_tokens = output_tokens ,
139- total_tokens = total_tokens ,
164+ SPANDATA . GEN_AI_RESPONSE_TEXT ,
165+ json . dumps ( content_blocks ) ,
166+ unpack = False ,
140167 )
141- span .set_data (SPANDATA .AI_STREAMING , True )
168+
169+ record_token_usage (
170+ span ,
171+ input_tokens = input_tokens ,
172+ output_tokens = output_tokens ,
173+ )
174+
175+ # TODO: GEN_AI_RESPONSE_TOOL_CALLS ?
176+
177+ if finish_span :
178+ span .__exit__ (None , None , None )
142179
143180
144181def _sentry_patched_create_common (f , * args , ** kwargs ):
@@ -155,69 +192,95 @@ def _sentry_patched_create_common(f, *args, **kwargs):
155192 except TypeError :
156193 return f (* args , ** kwargs )
157194
195+ model = kwargs .get ("model" , "" )
196+
158197 span = sentry_sdk .start_span (
159- op = OP .ANTHROPIC_MESSAGES_CREATE ,
160- description = "Anthropic messages create" ,
198+ op = OP .GEN_AI_CHAT ,
199+ name = f"chat { model } " . strip () ,
161200 origin = AnthropicIntegration .origin ,
162201 )
163202 span .__enter__ ()
164203
165- result = yield f , args , kwargs
204+ _set_input_data ( span , kwargs , integration )
166205
167- # add data to span and finish it
168- messages = list (kwargs ["messages" ])
169- model = kwargs .get ("model" )
206+ result = yield f , args , kwargs
170207
171208 with capture_internal_exceptions ():
172- span .set_data (SPANDATA .AI_MODEL_ID , model )
173- span .set_data (SPANDATA .AI_STREAMING , False )
174-
175- if should_send_default_pii () and integration .include_prompts :
176- span .set_data (SPANDATA .AI_INPUT_MESSAGES , messages )
177-
178209 if hasattr (result , "content" ):
179- if should_send_default_pii () and integration .include_prompts :
180- span .set_data (SPANDATA .AI_RESPONSES , _get_responses (result .content ))
181- _calculate_token_usage (result , span )
182- span .__exit__ (None , None , None )
210+ input_tokens , output_tokens = _get_token_usage (result )
211+
212+ content_blocks = []
213+ for content_block in result .content :
214+ if hasattr (content_block , "to_dict" ):
215+ content_blocks .append (content_block .to_dict ())
216+ elif hasattr (content_block , "model_dump" ):
217+ content_blocks .append (content_block .model_dump ())
218+ elif hasattr (content_block , "text" ):
219+ content_blocks .append ({"type" : "text" , "text" : content_block .text })
220+
221+ _set_output_data (
222+ span = span ,
223+ integration = integration ,
224+ model = getattr (result , "model" , None ),
225+ input_tokens = input_tokens ,
226+ output_tokens = output_tokens ,
227+ content_blocks = content_blocks ,
228+ finish_span = True ,
229+ )
183230
184231 # Streaming response
185232 elif hasattr (result , "_iterator" ):
186233 old_iterator = result ._iterator
187234
188235 def new_iterator ():
189236 # type: () -> Iterator[MessageStreamEvent]
237+ model = None
190238 input_tokens = 0
191239 output_tokens = 0
192240 content_blocks = [] # type: list[str]
193241
194242 for event in old_iterator :
195- input_tokens , output_tokens , content_blocks = _collect_ai_data (
196- event , input_tokens , output_tokens , content_blocks
243+ model , input_tokens , output_tokens , content_blocks = (
244+ _collect_ai_data (
245+ event , model , input_tokens , output_tokens , content_blocks
246+ )
197247 )
198248 yield event
199249
200- _add_ai_data_to_span (
201- span , integration , input_tokens , output_tokens , content_blocks
250+ _set_output_data (
251+ span = span ,
252+ integration = integration ,
253+ model = model ,
254+ input_tokens = input_tokens ,
255+ output_tokens = output_tokens ,
256+ content_blocks = [{"text" : "" .join (content_blocks ), "type" : "text" }],
257+ finish_span = True ,
202258 )
203- span .__exit__ (None , None , None )
204259
205260 async def new_iterator_async ():
206261 # type: () -> AsyncIterator[MessageStreamEvent]
262+ model = None
207263 input_tokens = 0
208264 output_tokens = 0
209265 content_blocks = [] # type: list[str]
210266
211267 async for event in old_iterator :
212- input_tokens , output_tokens , content_blocks = _collect_ai_data (
213- event , input_tokens , output_tokens , content_blocks
268+ model , input_tokens , output_tokens , content_blocks = (
269+ _collect_ai_data (
270+ event , model , input_tokens , output_tokens , content_blocks
271+ )
214272 )
215273 yield event
216274
217- _add_ai_data_to_span (
218- span , integration , input_tokens , output_tokens , content_blocks
275+ _set_output_data (
276+ span = span ,
277+ integration = integration ,
278+ model = model ,
279+ input_tokens = input_tokens ,
280+ output_tokens = output_tokens ,
281+ content_blocks = [{"text" : "" .join (content_blocks ), "type" : "text" }],
282+ finish_span = True ,
219283 )
220- span .__exit__ (None , None , None )
221284
222285 if str (type (result ._iterator )) == "<class 'async_generator'>" :
223286 result ._iterator = new_iterator_async ()
0 commit comments