Skip to content

Commit c80a413

Browse files
committed
fixed tests
1 parent 1bbb772 commit c80a413

File tree

2 files changed

+73
-14
lines changed

2 files changed

+73
-14
lines changed

sentry_sdk/integrations/openai.py

Lines changed: 26 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,7 @@ def _calculate_token_usage(
155155
)
156156

157157

158+
# TODO: rename to _set_input_data and _set_output_data
158159
def _set_request_data(span, kwargs, operation, integration):
159160
# type: (Span, dict[str, Any], str, Integration) -> None
160161
messages = kwargs.get("messages")
@@ -172,6 +173,7 @@ def _set_request_data(span, kwargs, operation, integration):
172173
):
173174
set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages)
174175

176+
# TODO: make mapping and loop over kwargs to set attributes
175177
# Common attributes
176178
model = kwargs.get("model")
177179
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
@@ -215,8 +217,8 @@ def _set_request_data(span, kwargs, operation, integration):
215217
)
216218

217219

218-
def _set_response_data(span, response, kwargs, integration):
219-
# type: (Span, Any, dict[str, Any], Integration) -> None
220+
def _set_response_data(span, response, kwargs, integration, finish_span=True):
221+
# type: (Span, Any, dict[str, Any], Integration, bool) -> None
220222
if hasattr(response, "model"):
221223
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model)
222224

@@ -236,6 +238,7 @@ def _set_response_data(span, response, kwargs, integration):
236238
SPANDATA.GEN_AI_RESPONSE_TEXT,
237239
safe_serialize(response_text),
238240
)
241+
span.__exit__(None, None, None)
239242

240243
elif hasattr(response, "output"):
241244
if should_send_default_pii() and integration.include_prompts:
@@ -318,6 +321,10 @@ async def new_iterator_async():
318321
response._iterator = new_iterator_async()
319322
else:
320323
response._iterator = new_iterator()
324+
else:
325+
set_data_normalized(span, "unknown_response", True)
326+
if finish_span:
327+
span.__exit__(None, None, None)
321328

322329
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
323330

@@ -341,16 +348,18 @@ def _new_chat_completion_common(f, *args, **kwargs):
341348
model = kwargs.get("model")
342349
operation = "chat"
343350

344-
with sentry_sdk.start_span(
351+
span = sentry_sdk.start_span(
345352
op=consts.OP.GEN_AI_CHAT,
346353
name=f"{operation} {model}",
347354
origin=OpenAIIntegration.origin,
348-
) as span:
349-
_set_request_data(span, kwargs, operation, integration)
355+
)
356+
span.__enter__()
350357

351-
response = yield f, args, kwargs
358+
_set_request_data(span, kwargs, operation, integration)
352359

353-
_set_response_data(span, response, kwargs, integration)
360+
response = yield f, args, kwargs
361+
362+
_set_response_data(span, response, kwargs, integration)
354363

355364
return response
356365

@@ -443,7 +452,7 @@ def _new_embeddings_create_common(f, *args, **kwargs):
443452

444453
response = yield f, args, kwargs
445454

446-
_set_response_data(span, response, kwargs, integration)
455+
_set_response_data(span, response, kwargs, integration, finish_span=False)
447456

448457
return response
449458

@@ -525,18 +534,21 @@ def _new_responses_create_common(f, *args, **kwargs):
525534
model = kwargs.get("model")
526535
operation = "responses"
527536

528-
with sentry_sdk.start_span(
537+
span = sentry_sdk.start_span(
529538
op=consts.OP.GEN_AI_RESPONSES,
530539
name=f"{operation} {model}",
531540
origin=OpenAIIntegration.origin,
532-
) as span:
533-
_set_request_data(span, kwargs, operation, integration)
541+
)
542+
span.__enter__()
543+
_set_request_data(span, kwargs, operation, integration)
534544

535-
response = yield f, args, kwargs
545+
response = yield f, args, kwargs
536546

537-
_set_response_data(span, response, kwargs, integration)
547+
_set_response_data(span, response, kwargs, integration)
538548

539-
return response
549+
span.__exit__(None, None, None)
550+
551+
return response
540552

541553

542554
def _wrap_responses_create(f):

tests/integrations/openai/test_openai.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1126,6 +1126,53 @@ async def test_ai_client_span_responses_async_api(sentry_init, capture_events):
11261126
}
11271127

11281128

1129+
@pytest.mark.asyncio
1130+
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
1131+
async def test_ai_client_span_streaming_responses_async_api(
1132+
sentry_init, capture_events
1133+
):
1134+
sentry_init(
1135+
integrations=[OpenAIIntegration(include_prompts=True)],
1136+
traces_sample_rate=1.0,
1137+
send_default_pii=True,
1138+
)
1139+
events = capture_events()
1140+
1141+
client = AsyncOpenAI(api_key="z")
1142+
client.responses._post = AsyncMock(return_value=EXAMPLE_RESPONSE)
1143+
1144+
with start_transaction(name="openai tx"):
1145+
await client.responses.create(
1146+
model="gpt-4o",
1147+
instructions="You are a coding assistant that talks like a pirate.",
1148+
input="How do I check if a Python object is an instance of a class?",
1149+
stream=True,
1150+
)
1151+
1152+
(transaction,) = events
1153+
spans = transaction["spans"]
1154+
1155+
assert len(spans) == 1
1156+
assert spans[0]["op"] == "gen_ai.responses"
1157+
assert spans[0]["origin"] == "auto.ai.openai"
1158+
assert spans[0]["data"] == {
1159+
"ai.streaming": True,
1160+
"gen_ai.operation.name": "responses",
1161+
"gen_ai.request.messages": "How do I check if a Python object is an instance of a class?",
1162+
"gen_ai.request.model": "gpt-4o",
1163+
"gen_ai.response.model": "response-model-id",
1164+
"gen_ai.system": "openai",
1165+
"gen_ai.usage.input_tokens": 20,
1166+
"gen_ai.usage.input_tokens.cached": 5,
1167+
"gen_ai.usage.output_tokens": 10,
1168+
"gen_ai.usage.output_tokens.reasoning": 8,
1169+
"gen_ai.usage.total_tokens": 30,
1170+
"gen_ai.response.text": '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]',
1171+
"thread.id": mock.ANY,
1172+
"thread.name": mock.ANY,
1173+
}
1174+
1175+
11291176
@pytest.mark.asyncio
11301177
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
11311178
async def test_error_in_responses_async_api(sentry_init, capture_events):

0 commit comments

Comments
 (0)