|
| 1 | +import json |
1 | 2 | import pytest |
2 | 3 | from openai import AsyncOpenAI, OpenAI, AsyncStream, Stream, OpenAIError |
3 | 4 | from openai.types import CompletionUsage, CreateEmbeddingResponse, Embedding |
@@ -52,7 +53,7 @@ async def __call__(self, *args, **kwargs): |
52 | 53 | ) |
53 | 54 | ], |
54 | 55 | created=10000000, |
55 | | - model="model-id", |
| 56 | + model="response-model-id", |
56 | 57 | object="chat.completion", |
57 | 58 | usage=CompletionUsage( |
58 | 59 | completion_tokens=10, |
@@ -86,7 +87,7 @@ async def __call__(self, *args, **kwargs): |
86 | 87 | tool_choice="none", |
87 | 88 | tools=[], |
88 | 89 | created_at=10000000, |
89 | | - model="model-id", |
| 90 | + model="response-model-id", |
90 | 91 | object="response", |
91 | 92 | usage=ResponseUsage( |
92 | 93 | input_tokens=20, |
@@ -143,7 +144,7 @@ def test_nonstreaming_chat_completion( |
143 | 144 | assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]["content"] |
144 | 145 | assert ( |
145 | 146 | "the model response" |
146 | | - in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]["content"] |
| 147 | + in json.loads(span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT])[0]["content"] |
147 | 148 | ) |
148 | 149 | else: |
149 | 150 | assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] |
@@ -188,7 +189,7 @@ async def test_nonstreaming_chat_completion_async( |
188 | 189 | assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]["content"] |
189 | 190 | assert ( |
190 | 191 | "the model response" |
191 | | - in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]["content"] |
| 192 | + in json.loads(span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT])[0]["content"] |
192 | 193 | ) |
193 | 194 | else: |
194 | 195 | assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] |
@@ -986,7 +987,10 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): |
986 | 987 | assert spans[0]["op"] == "gen_ai.responses" |
987 | 988 | assert spans[0]["origin"] == "auto.ai.openai" |
988 | 989 | assert spans[0]["data"] == { |
| 990 | + "gen_ai.operation.name": "responses", |
989 | 991 | "gen_ai.request.model": "gpt-4o", |
| 992 | + "gen_ai.response.model": "response-model-id", |
| 993 | + "gen_ai.system": "openai", |
990 | 994 | "gen_ai.usage.input_tokens": 20, |
991 | 995 | "gen_ai.usage.input_tokens.cached": 5, |
992 | 996 | "gen_ai.usage.output_tokens": 10, |
@@ -1026,8 +1030,11 @@ def test_ai_client_span_responses_api(sentry_init, capture_events): |
1026 | 1030 | assert spans[0]["op"] == "gen_ai.responses" |
1027 | 1031 | assert spans[0]["origin"] == "auto.ai.openai" |
1028 | 1032 | assert spans[0]["data"] == { |
| 1033 | + "gen_ai.operation.name": "responses", |
1029 | 1034 | "gen_ai.request.messages": "How do I check if a Python object is an instance of a class?", |
1030 | 1035 | "gen_ai.request.model": "gpt-4o", |
| 1036 | + "gen_ai.system": "openai", |
| 1037 | + "gen_ai.response.model": "response-model-id", |
1031 | 1038 | "gen_ai.usage.input_tokens": 20, |
1032 | 1039 | "gen_ai.usage.input_tokens.cached": 5, |
1033 | 1040 | "gen_ai.usage.output_tokens": 10, |
@@ -1103,8 +1110,11 @@ async def test_ai_client_span_responses_async_api(sentry_init, capture_events): |
1103 | 1110 | assert spans[0]["op"] == "gen_ai.responses" |
1104 | 1111 | assert spans[0]["origin"] == "auto.ai.openai" |
1105 | 1112 | assert spans[0]["data"] == { |
| 1113 | + "gen_ai.operation.name": "responses", |
1106 | 1114 | "gen_ai.request.messages": "How do I check if a Python object is an instance of a class?", |
1107 | 1115 | "gen_ai.request.model": "gpt-4o", |
| 1116 | + "gen_ai.response.model": "response-model-id", |
| 1117 | + "gen_ai.system": "openai", |
1108 | 1118 | "gen_ai.usage.input_tokens": 20, |
1109 | 1119 | "gen_ai.usage.input_tokens.cached": 5, |
1110 | 1120 | "gen_ai.usage.output_tokens": 10, |
|
0 commit comments