@@ -404,7 +404,10 @@ def test_bad_chat_completion(sentry_init, capture_events):
404404 model = "some-model" , messages = [{"role" : "system" , "content" : "hello" }]
405405 )
406406
407- (event ,) = events
407+ (
408+ _ ,
409+ event ,
410+ ) = events
408411 assert event ["level" ] == "error"
409412
410413
@@ -422,7 +425,10 @@ async def test_bad_chat_completion_async(sentry_init, capture_events):
422425 model = "some-model" , messages = [{"role" : "system" , "content" : "hello" }]
423426 )
424427
425- (event ,) = events
428+ (
429+ _ ,
430+ event ,
431+ ) = events
426432 assert event ["level" ] == "error"
427433
428434
@@ -545,7 +551,10 @@ def test_embeddings_create_raises_error(
545551 with pytest .raises (OpenAIError ):
546552 client .embeddings .create (input = "hello" , model = "text-embedding-3-large" )
547553
548- (event ,) = events
554+ (
555+ _ ,
556+ event ,
557+ ) = events
549558 assert event ["level" ] == "error"
550559
551560
@@ -574,7 +583,10 @@ async def test_embeddings_create_raises_error_async(
574583 with pytest .raises (OpenAIError ):
575584 await client .embeddings .create (input = "hello" , model = "text-embedding-3-large" )
576585
577- (event ,) = events
586+ (
587+ _ ,
588+ event ,
589+ ) = events
578590 assert event ["level" ] == "error"
579591
580592
@@ -995,6 +1007,10 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events):
9951007 "gen_ai.usage.output_tokens" : 10 ,
9961008 "gen_ai.usage.output_tokens.reasoning" : 8 ,
9971009 "gen_ai.usage.total_tokens" : 30 ,
1010+ "sentry.name" : "responses gpt-4o" ,
1011+ "sentry.op" : "gen_ai.responses" ,
1012+ "sentry.origin" : "auto.ai.openai" ,
1013+ "sentry.source" : "custom" ,
9981014 "thread.id" : mock .ANY ,
9991015 "thread.name" : mock .ANY ,
10001016 }
@@ -1040,6 +1056,10 @@ def test_ai_client_span_responses_api(sentry_init, capture_events):
10401056 "gen_ai.usage.output_tokens.reasoning" : 8 ,
10411057 "gen_ai.usage.total_tokens" : 30 ,
10421058 "gen_ai.response.text" : '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]' ,
1059+ "sentry.name" : "responses gpt-4o" ,
1060+ "sentry.op" : "gen_ai.responses" ,
1061+ "sentry.origin" : "auto.ai.openai" ,
1062+ "sentry.source" : "custom" ,
10431063 "thread.id" : mock .ANY ,
10441064 "thread.name" : mock .ANY ,
10451065 }
@@ -1120,6 +1140,10 @@ async def test_ai_client_span_responses_async_api(sentry_init, capture_events):
11201140 "gen_ai.usage.output_tokens.reasoning" : 8 ,
11211141 "gen_ai.usage.total_tokens" : 30 ,
11221142 "gen_ai.response.text" : '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]' ,
1143+ "sentry.name" : "responses gpt-4o" ,
1144+ "sentry.op" : "gen_ai.responses" ,
1145+ "sentry.origin" : "auto.ai.openai" ,
1146+ "sentry.source" : "custom" ,
11231147 "thread.id" : mock .ANY ,
11241148 "thread.name" : mock .ANY ,
11251149 }
@@ -1167,6 +1191,10 @@ async def test_ai_client_span_streaming_responses_async_api(
11671191 "gen_ai.usage.output_tokens.reasoning" : 8 ,
11681192 "gen_ai.usage.total_tokens" : 30 ,
11691193 "gen_ai.response.text" : '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]' ,
1194+ "sentry.name" : "responses gpt-4o" ,
1195+ "sentry.op" : "gen_ai.responses" ,
1196+ "sentry.origin" : "auto.ai.openai" ,
1197+ "sentry.source" : "custom" ,
11701198 "thread.id" : mock .ANY ,
11711199 "thread.name" : mock .ANY ,
11721200 }
0 commit comments