@@ -121,13 +121,13 @@ def _add_ai_data_to_span(
121121 with capture_internal_exceptions ():
122122 if should_send_default_pii () and integration .include_prompts :
123123 complete_message = "" .join (content_blocks )
124- span .set_data (
124+ span .set_attribute (
125125 SPANDATA .AI_RESPONSES ,
126126 [{"type" : "text" , "text" : complete_message }],
127127 )
128128 total_tokens = input_tokens + output_tokens
129129 record_token_usage (span , input_tokens , output_tokens , total_tokens )
130- span .set_data (SPANDATA .AI_STREAMING , True )
130+ span .set_attribute (SPANDATA .AI_STREAMING , True )
131131
132132
133133def _sentry_patched_create_common (f , * args , ** kwargs ):
@@ -159,15 +159,17 @@ def _sentry_patched_create_common(f, *args, **kwargs):
159159 model = kwargs .get ("model" )
160160
161161 with capture_internal_exceptions ():
162- span .set_data (SPANDATA .AI_MODEL_ID , model )
163- span .set_data (SPANDATA .AI_STREAMING , False )
162+ span .set_attribute (SPANDATA .AI_MODEL_ID , model )
163+ span .set_attribute (SPANDATA .AI_STREAMING , False )
164164
165165 if should_send_default_pii () and integration .include_prompts :
166- span .set_data (SPANDATA .AI_INPUT_MESSAGES , messages )
166+ span .set_attribute (SPANDATA .AI_INPUT_MESSAGES , messages )
167167
168168 if hasattr (result , "content" ):
169169 if should_send_default_pii () and integration .include_prompts :
170- span .set_data (SPANDATA .AI_RESPONSES , _get_responses (result .content ))
170+ span .set_attribute (
171+ SPANDATA .AI_RESPONSES , _get_responses (result .content )
172+ )
171173 _calculate_token_usage (result , span )
172174 span .__exit__ (None , None , None )
173175
@@ -215,7 +217,7 @@ async def new_iterator_async():
215217 result ._iterator = new_iterator ()
216218
217219 else :
218- span .set_data ("unknown_response" , True )
220+ span .set_attribute ("unknown_response" , True )
219221 span .__exit__ (None , None , None )
220222
221223 return result
0 commit comments