|
5 | 5 | from openai.types.chat.chat_completion import Choice |
6 | 6 | from openai.types.chat.chat_completion_chunk import ChoiceDelta, Choice as DeltaChoice |
7 | 7 | from openai.types.create_embedding_response import Usage as EmbeddingTokenUsage |
| 8 | +from openai.types.responses.response_usage import ( |
| 9 | + InputTokensDetails, |
| 10 | + OutputTokensDetails, |
| 11 | +) |
| 12 | +from openai.types.responses import ( |
| 13 | + Response, |
| 14 | + ResponseUsage, |
| 15 | + ResponseOutputMessage, |
| 16 | + ResponseOutputText, |
| 17 | +) |
8 | 18 |
|
9 | 19 | from sentry_sdk import start_transaction |
10 | 20 | from sentry_sdk.consts import SPANDATA |
@@ -46,6 +56,43 @@ async def __call__(self, *args, **kwargs): |
46 | 56 | ) |
47 | 57 |
|
48 | 58 |
|
| 59 | +EXAMPLE_RESPONSE = Response( |
| 60 | + id="chat-id", |
| 61 | + output=[ |
| 62 | + ResponseOutputMessage( |
| 63 | + id="message-id", |
| 64 | + content=[ |
| 65 | + ResponseOutputText( |
| 66 | + annotations=[], |
| 67 | + text="the model response", |
| 68 | + type="output_text", |
| 69 | + ), |
| 70 | + ], |
| 71 | + role="assistant", |
| 72 | + status="completed", |
| 73 | + type="message", |
| 74 | + ), |
| 75 | + ], |
| 76 | + parallel_tool_calls=False, |
| 77 | + tool_choice="none", |
| 78 | + tools=[], |
| 79 | + created_at=10000000, |
| 80 | + model="model-id", |
| 81 | + object="response", |
| 82 | + usage=ResponseUsage( |
| 83 | + input_tokens=20, |
| 84 | + input_tokens_details=InputTokensDetails( |
| 85 | + cached_tokens=5, |
| 86 | + ), |
| 87 | + output_tokens=10, |
| 88 | + output_tokens_details=OutputTokensDetails( |
| 89 | + reasoning_tokens=8, |
| 90 | + ), |
| 91 | + total_tokens=30, |
| 92 | + ), |
| 93 | +) |
| 94 | + |
| 95 | + |
49 | 96 | async def async_iterator(values): |
50 | 97 | for value in values: |
51 | 98 | yield value |
@@ -897,3 +944,79 @@ def count_tokens(msg): |
897 | 944 | output_tokens_reasoning=None, |
898 | 945 | total_tokens=None, |
899 | 946 | ) |
| 947 | + |
| 948 | + |
| 949 | +def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): |
| 950 | + sentry_init( |
| 951 | + integrations=[OpenAIIntegration()], |
| 952 | + traces_sample_rate=1.0, |
| 953 | + ) |
| 954 | + events = capture_events() |
| 955 | + |
| 956 | + client = OpenAI(api_key="z") |
| 957 | + client.responses._post = mock.Mock(return_value=EXAMPLE_RESPONSE) |
| 958 | + |
| 959 | + with start_transaction(name="openai tx"): |
| 960 | + client.responses.create( |
| 961 | + model="gpt-4o", |
| 962 | + instructions="You are a coding assistant that talks like a pirate.", |
| 963 | + input="How do I check if a Python object is an instance of a class?", |
| 964 | + ) |
| 965 | + |
| 966 | + (transaction,) = events |
| 967 | + spans = transaction["spans"] |
| 968 | + |
| 969 | + assert len(spans) == 1 |
| 970 | + assert spans[0]["op"] == "gen_ai.responses" |
| 971 | + assert spans[0]["origin"] == "auto.ai.openai" |
| 972 | + assert spans[0]["data"] == { |
| 973 | + "gen_ai.request.model": "gpt-4o", |
| 974 | + "gen_ai.usage.input_tokens": 20, |
| 975 | + "gen_ai.usage.input_tokens.cached": 5, |
| 976 | + "gen_ai.usage.output_tokens": 10, |
| 977 | + "gen_ai.usage.output_tokens.reasoning": 8, |
| 978 | + "gen_ai.usage.total_tokens": 30, |
| 979 | + "thread.id": mock.ANY, |
| 980 | + "thread.name": mock.ANY, |
| 981 | + } |
| 982 | + |
| 983 | + assert "gen_ai.request.messages" not in spans[0]["data"] |
| 984 | + assert "gen_ai.response.text" not in spans[0]["data"] |
| 985 | + |
| 986 | + |
| 987 | +def test_ai_client_span_responses_api(sentry_init, capture_events): |
| 988 | + sentry_init( |
| 989 | + integrations=[OpenAIIntegration(include_prompts=True)], |
| 990 | + traces_sample_rate=1.0, |
| 991 | + send_default_pii=True, |
| 992 | + ) |
| 993 | + events = capture_events() |
| 994 | + |
| 995 | + client = OpenAI(api_key="z") |
| 996 | + client.responses._post = mock.Mock(return_value=EXAMPLE_RESPONSE) |
| 997 | + |
| 998 | + with start_transaction(name="openai tx"): |
| 999 | + client.responses.create( |
| 1000 | + model="gpt-4o", |
| 1001 | + instructions="You are a coding assistant that talks like a pirate.", |
| 1002 | + input="How do I check if a Python object is an instance of a class?", |
| 1003 | + ) |
| 1004 | + |
| 1005 | + (transaction,) = events |
| 1006 | + spans = transaction["spans"] |
| 1007 | + |
| 1008 | + assert len(spans) == 1 |
| 1009 | + assert spans[0]["op"] == "gen_ai.responses" |
| 1010 | + assert spans[0]["origin"] == "auto.ai.openai" |
| 1011 | + assert spans[0]["data"] == { |
| 1012 | + "gen_ai.request.messages": "How do I check if a Python object is an instance of a class?", |
| 1013 | + "gen_ai.request.model": "gpt-4o", |
| 1014 | + "gen_ai.usage.input_tokens": 20, |
| 1015 | + "gen_ai.usage.input_tokens.cached": 5, |
| 1016 | + "gen_ai.usage.output_tokens": 10, |
| 1017 | + "gen_ai.usage.output_tokens.reasoning": 8, |
| 1018 | + "gen_ai.usage.total_tokens": 30, |
| 1019 | + "gen_ai.response.text": '[{"id": "message-id", "content": [{"annotations": [], "text": "the model response", "type": "output_text"}], "role": "assistant", "status": "completed", "type": "message"}]', |
| 1020 | + "thread.id": mock.ANY, |
| 1021 | + "thread.name": mock.ANY, |
| 1022 | + } |
0 commit comments