@@ -31,60 +31,60 @@ def _capture_exception(exc):
3131
3232def _set_agent_data (span , agent ):
3333 # type: (sentry_sdk.tracing.Span, agents.Agent) -> None
34- span .set_data (
34+ span .set_attribute (
3535 SPANDATA .GEN_AI_SYSTEM , "openai"
3636 ) # See footnote for https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-system for explanation why.
3737
38- span .set_data (SPANDATA .GEN_AI_AGENT_NAME , agent .name )
38+ span .set_attribute (SPANDATA .GEN_AI_AGENT_NAME , agent .name )
3939
4040 if agent .model_settings .max_tokens :
41- span .set_data (
41+ span .set_attribute (
4242 SPANDATA .GEN_AI_REQUEST_MAX_TOKENS , agent .model_settings .max_tokens
4343 )
4444
4545 if agent .model :
46- span .set_data (SPANDATA .GEN_AI_REQUEST_MODEL , agent .model )
46+ span .set_attribute (SPANDATA .GEN_AI_REQUEST_MODEL , agent .model )
4747
4848 if agent .model_settings .presence_penalty :
49- span .set_data (
49+ span .set_attribute (
5050 SPANDATA .GEN_AI_REQUEST_PRESENCE_PENALTY ,
5151 agent .model_settings .presence_penalty ,
5252 )
5353
5454 if agent .model_settings .temperature :
55- span .set_data (
55+ span .set_attribute (
5656 SPANDATA .GEN_AI_REQUEST_TEMPERATURE , agent .model_settings .temperature
5757 )
5858
5959 if agent .model_settings .top_p :
60- span .set_data (SPANDATA .GEN_AI_REQUEST_TOP_P , agent .model_settings .top_p )
60+ span .set_attribute (SPANDATA .GEN_AI_REQUEST_TOP_P , agent .model_settings .top_p )
6161
6262 if agent .model_settings .frequency_penalty :
63- span .set_data (
63+ span .set_attribute (
6464 SPANDATA .GEN_AI_REQUEST_FREQUENCY_PENALTY ,
6565 agent .model_settings .frequency_penalty ,
6666 )
6767
6868 if len (agent .tools ) > 0 :
69- span .set_data (
69+ span .set_attribute (
7070 SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS ,
7171 safe_serialize ([vars (tool ) for tool in agent .tools ]),
7272 )
7373
7474
7575def _set_usage_data (span , usage ):
7676 # type: (sentry_sdk.tracing.Span, Usage) -> None
77- span .set_data (SPANDATA .GEN_AI_USAGE_INPUT_TOKENS , usage .input_tokens )
78- span .set_data (
77+ span .set_attribute (SPANDATA .GEN_AI_USAGE_INPUT_TOKENS , usage .input_tokens )
78+ span .set_attribute (
7979 SPANDATA .GEN_AI_USAGE_INPUT_TOKENS_CACHED ,
8080 usage .input_tokens_details .cached_tokens ,
8181 )
82- span .set_data (SPANDATA .GEN_AI_USAGE_OUTPUT_TOKENS , usage .output_tokens )
83- span .set_data (
82+ span .set_attribute (SPANDATA .GEN_AI_USAGE_OUTPUT_TOKENS , usage .output_tokens )
83+ span .set_attribute (
8484 SPANDATA .GEN_AI_USAGE_OUTPUT_TOKENS_REASONING ,
8585 usage .output_tokens_details .reasoning_tokens ,
8686 )
87- span .set_data (SPANDATA .GEN_AI_USAGE_TOTAL_TOKENS , usage .total_tokens )
87+ span .set_attribute (SPANDATA .GEN_AI_USAGE_TOTAL_TOKENS , usage .total_tokens )
8888
8989
9090def _set_input_data (span , get_response_kwargs ):
@@ -118,7 +118,9 @@ def _set_input_data(span, get_response_kwargs):
118118 if len (messages ) > 0 :
119119 request_messages .append ({"role" : role , "content" : messages })
120120
121- span .set_data (SPANDATA .GEN_AI_REQUEST_MESSAGES , safe_serialize (request_messages ))
121+ span .set_attribute (
122+ SPANDATA .GEN_AI_REQUEST_MESSAGES , safe_serialize (request_messages )
123+ )
122124
123125
124126def _set_output_data (span , result ):
@@ -143,12 +145,12 @@ def _set_output_data(span, result):
143145 output_messages ["response" ].append (output_message .dict ())
144146
145147 if len (output_messages ["tool" ]) > 0 :
146- span .set_data (
148+ span .set_attribute (
147149 SPANDATA .GEN_AI_RESPONSE_TOOL_CALLS , safe_serialize (output_messages ["tool" ])
148150 )
149151
150152 if len (output_messages ["response" ]) > 0 :
151- span .set_data (
153+ span .set_attribute (
152154 SPANDATA .GEN_AI_RESPONSE_TEXT , safe_serialize (output_messages ["response" ])
153155 )
154156
0 commit comments