@@ -404,7 +404,10 @@ def test_bad_chat_completion(sentry_init, capture_events):
404
404
model = "some-model" , messages = [{"role" : "system" , "content" : "hello" }]
405
405
)
406
406
407
- (event ,) = events
407
+ (
408
+ _ ,
409
+ event ,
410
+ ) = events
408
411
assert event ["level" ] == "error"
409
412
410
413
@@ -422,7 +425,10 @@ async def test_bad_chat_completion_async(sentry_init, capture_events):
422
425
model = "some-model" , messages = [{"role" : "system" , "content" : "hello" }]
423
426
)
424
427
425
- (event ,) = events
428
+ (
429
+ _ ,
430
+ event ,
431
+ ) = events
426
432
assert event ["level" ] == "error"
427
433
428
434
@@ -545,7 +551,10 @@ def test_embeddings_create_raises_error(
545
551
with pytest .raises (OpenAIError ):
546
552
client .embeddings .create (input = "hello" , model = "text-embedding-3-large" )
547
553
548
- (event ,) = events
554
+ (
555
+ _ ,
556
+ event ,
557
+ ) = events
549
558
assert event ["level" ] == "error"
550
559
551
560
@@ -574,7 +583,10 @@ async def test_embeddings_create_raises_error_async(
574
583
with pytest .raises (OpenAIError ):
575
584
await client .embeddings .create (input = "hello" , model = "text-embedding-3-large" )
576
585
577
- (event ,) = events
586
+ (
587
+ _ ,
588
+ event ,
589
+ ) = events
578
590
assert event ["level" ] == "error"
579
591
580
592
@@ -995,6 +1007,10 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events):
995
1007
"gen_ai.usage.output_tokens" : 10 ,
996
1008
"gen_ai.usage.output_tokens.reasoning" : 8 ,
997
1009
"gen_ai.usage.total_tokens" : 30 ,
1010
+ "sentry.name" : "responses gpt-4o" ,
1011
+ "sentry.op" : "gen_ai.responses" ,
1012
+ "sentry.origin" : "auto.ai.openai" ,
1013
+ "sentry.source" : "custom" ,
998
1014
"thread.id" : mock .ANY ,
999
1015
"thread.name" : mock .ANY ,
1000
1016
}
@@ -1040,6 +1056,10 @@ def test_ai_client_span_responses_api(sentry_init, capture_events):
1040
1056
"gen_ai.usage.output_tokens.reasoning" : 8 ,
1041
1057
"gen_ai.usage.total_tokens" : 30 ,
1042
1058
"gen_ai.response.text" : '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]' ,
1059
+ "sentry.name" : "responses gpt-4o" ,
1060
+ "sentry.op" : "gen_ai.responses" ,
1061
+ "sentry.origin" : "auto.ai.openai" ,
1062
+ "sentry.source" : "custom" ,
1043
1063
"thread.id" : mock .ANY ,
1044
1064
"thread.name" : mock .ANY ,
1045
1065
}
@@ -1120,6 +1140,10 @@ async def test_ai_client_span_responses_async_api(sentry_init, capture_events):
1120
1140
"gen_ai.usage.output_tokens.reasoning" : 8 ,
1121
1141
"gen_ai.usage.total_tokens" : 30 ,
1122
1142
"gen_ai.response.text" : '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]' ,
1143
+ "sentry.name" : "responses gpt-4o" ,
1144
+ "sentry.op" : "gen_ai.responses" ,
1145
+ "sentry.origin" : "auto.ai.openai" ,
1146
+ "sentry.source" : "custom" ,
1123
1147
"thread.id" : mock .ANY ,
1124
1148
"thread.name" : mock .ANY ,
1125
1149
}
@@ -1167,6 +1191,10 @@ async def test_ai_client_span_streaming_responses_async_api(
1167
1191
"gen_ai.usage.output_tokens.reasoning" : 8 ,
1168
1192
"gen_ai.usage.total_tokens" : 30 ,
1169
1193
"gen_ai.response.text" : '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]' ,
1194
+ "sentry.name" : "responses gpt-4o" ,
1195
+ "sentry.op" : "gen_ai.responses" ,
1196
+ "sentry.origin" : "auto.ai.openai" ,
1197
+ "sentry.source" : "custom" ,
1170
1198
"thread.id" : mock .ANY ,
1171
1199
"thread.name" : mock .ANY ,
1172
1200
}
0 commit comments