Skip to content

Commit b3f172b

Browse files
committed
Update tests
1 parent 4dad290 commit b3f172b

File tree

2 files changed

+37
-9
lines changed

2 files changed

+37
-9
lines changed

sentry_sdk/ai/monitoring.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def sync_wrapped(*args: Any, **kwargs: Any) -> Any:
3838
for k, v in kwargs.pop("sentry_data", {}).items():
3939
span.set_attribute(k, v)
4040
if curr_pipeline:
41-
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
41+
span.set_attribute(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
4242
return f(*args, **kwargs)
4343
else:
4444
_ai_pipeline_name.set(description)
@@ -68,7 +68,7 @@ async def async_wrapped(*args: Any, **kwargs: Any) -> Any:
6868
for k, v in kwargs.pop("sentry_data", {}).items():
6969
span.set_attribute(k, v)
7070
if curr_pipeline:
71-
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
71+
span.set_attribute(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline)
7272
return await f(*args, **kwargs)
7373
else:
7474
_ai_pipeline_name.set(description)
@@ -105,7 +105,7 @@ def record_token_usage(
105105
# TODO: move pipeline name elsewhere
106106
ai_pipeline_name = get_ai_pipeline_name()
107107
if ai_pipeline_name:
108-
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, ai_pipeline_name)
108+
span.set_attribute(SPANDATA.GEN_AI_PIPELINE_NAME, ai_pipeline_name)
109109

110110
if input_tokens is not None:
111111
span.set_attribute(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
@@ -117,10 +117,10 @@ def record_token_usage(
117117
)
118118

119119
if output_tokens is not None:
120-
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
120+
span.set_attribute(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
121121

122122
if output_tokens_reasoning is not None:
123-
span.set_data(
123+
span.set_attribute(
124124
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
125125
output_tokens_reasoning,
126126
)

tests/integrations/openai/test_openai.py

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -404,7 +404,10 @@ def test_bad_chat_completion(sentry_init, capture_events):
404404
model="some-model", messages=[{"role": "system", "content": "hello"}]
405405
)
406406

407-
(event,) = events
407+
(
408+
_,
409+
event,
410+
) = events
408411
assert event["level"] == "error"
409412

410413

@@ -422,7 +425,10 @@ async def test_bad_chat_completion_async(sentry_init, capture_events):
422425
model="some-model", messages=[{"role": "system", "content": "hello"}]
423426
)
424427

425-
(event,) = events
428+
(
429+
_,
430+
event,
431+
) = events
426432
assert event["level"] == "error"
427433

428434

@@ -545,7 +551,10 @@ def test_embeddings_create_raises_error(
545551
with pytest.raises(OpenAIError):
546552
client.embeddings.create(input="hello", model="text-embedding-3-large")
547553

548-
(event,) = events
554+
(
555+
_,
556+
event,
557+
) = events
549558
assert event["level"] == "error"
550559

551560

@@ -574,7 +583,10 @@ async def test_embeddings_create_raises_error_async(
574583
with pytest.raises(OpenAIError):
575584
await client.embeddings.create(input="hello", model="text-embedding-3-large")
576585

577-
(event,) = events
586+
(
587+
_,
588+
event,
589+
) = events
578590
assert event["level"] == "error"
579591

580592

@@ -995,6 +1007,10 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events):
9951007
"gen_ai.usage.output_tokens": 10,
9961008
"gen_ai.usage.output_tokens.reasoning": 8,
9971009
"gen_ai.usage.total_tokens": 30,
1010+
"sentry.name": "responses gpt-4o",
1011+
"sentry.op": "gen_ai.responses",
1012+
"sentry.origin": "auto.ai.openai",
1013+
"sentry.source": "custom",
9981014
"thread.id": mock.ANY,
9991015
"thread.name": mock.ANY,
10001016
}
@@ -1040,6 +1056,10 @@ def test_ai_client_span_responses_api(sentry_init, capture_events):
10401056
"gen_ai.usage.output_tokens.reasoning": 8,
10411057
"gen_ai.usage.total_tokens": 30,
10421058
"gen_ai.response.text": '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]',
1059+
"sentry.name": "responses gpt-4o",
1060+
"sentry.op": "gen_ai.responses",
1061+
"sentry.origin": "auto.ai.openai",
1062+
"sentry.source": "custom",
10431063
"thread.id": mock.ANY,
10441064
"thread.name": mock.ANY,
10451065
}
@@ -1120,6 +1140,10 @@ async def test_ai_client_span_responses_async_api(sentry_init, capture_events):
11201140
"gen_ai.usage.output_tokens.reasoning": 8,
11211141
"gen_ai.usage.total_tokens": 30,
11221142
"gen_ai.response.text": '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]',
1143+
"sentry.name": "responses gpt-4o",
1144+
"sentry.op": "gen_ai.responses",
1145+
"sentry.origin": "auto.ai.openai",
1146+
"sentry.source": "custom",
11231147
"thread.id": mock.ANY,
11241148
"thread.name": mock.ANY,
11251149
}
@@ -1167,6 +1191,10 @@ async def test_ai_client_span_streaming_responses_async_api(
11671191
"gen_ai.usage.output_tokens.reasoning": 8,
11681192
"gen_ai.usage.total_tokens": 30,
11691193
"gen_ai.response.text": '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]',
1194+
"sentry.name": "responses gpt-4o",
1195+
"sentry.op": "gen_ai.responses",
1196+
"sentry.origin": "auto.ai.openai",
1197+
"sentry.source": "custom",
11701198
"thread.id": mock.ANY,
11711199
"thread.name": mock.ANY,
11721200
}

0 commit comments

Comments
 (0)