|
5 | 5 | from cohere import Client, ChatMessage |
6 | 6 |
|
7 | 7 | from sentry_sdk import start_transaction |
| 8 | +from sentry_sdk.consts import SPANDATA |
8 | 9 | from sentry_sdk.integrations.cohere import CohereIntegration |
9 | 10 |
|
10 | 11 | from unittest import mock # python 3.3 and above |
@@ -53,15 +54,15 @@ def test_nonstreaming_chat( |
53 | 54 | assert tx["type"] == "transaction" |
54 | 55 | span = tx["spans"][0] |
55 | 56 | assert span["op"] == "ai.chat_completions.create.cohere" |
56 | | - assert span["data"]["ai.model_id"] == "some-model" |
| 57 | + assert span["data"][SPANDATA.AI_MODEL_ID] == "some-model" |
57 | 58 |
|
58 | 59 | if send_default_pii and include_prompts: |
59 | | - assert "some context" in span["data"]["ai.input_messages"][0]["content"] |
60 | | - assert "hello" in span["data"]["ai.input_messages"][1]["content"] |
61 | | - assert "the model response" in span["data"]["ai.responses"] |
| 60 | + assert "some context" in span["data"][SPANDATA.AI_INPUT_MESSAGES][0]["content"] |
| 61 | + assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES][1]["content"] |
| 62 | + assert "the model response" in span["data"][SPANDATA.AI_RESPONSES] |
62 | 63 | else: |
63 | | - assert "ai.input_messages" not in span["data"] |
64 | | - assert "ai.responses" not in span["data"] |
| 64 | + assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] |
| 65 | + assert SPANDATA.AI_RESPONSES not in span["data"] |
65 | 66 |
|
66 | 67 | assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10 |
67 | 68 | assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20 |
@@ -124,15 +125,15 @@ def test_streaming_chat(sentry_init, capture_events, send_default_pii, include_p |
124 | 125 | assert tx["type"] == "transaction" |
125 | 126 | span = tx["spans"][0] |
126 | 127 | assert span["op"] == "ai.chat_completions.create.cohere" |
127 | | - assert span["data"]["ai.model_id"] == "some-model" |
| 128 | + assert span["data"][SPANDATA.AI_MODEL_ID] == "some-model" |
128 | 129 |
|
129 | 130 | if send_default_pii and include_prompts: |
130 | | - assert "some context" in span["data"]["ai.input_messages"][0]["content"] |
131 | | - assert "hello" in span["data"]["ai.input_messages"][1]["content"] |
132 | | - assert "the model response" in span["data"]["ai.responses"] |
| 131 | + assert "some context" in span["data"][SPANDATA.AI_INPUT_MESSAGES][0]["content"] |
| 132 | + assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES][1]["content"] |
| 133 | + assert "the model response" in span["data"][SPANDATA.AI_RESPONSES] |
133 | 134 | else: |
134 | | - assert "ai.input_messages" not in span["data"] |
135 | | - assert "ai.responses" not in span["data"] |
| 135 | + assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] |
| 136 | + assert SPANDATA.AI_RESPONSES not in span["data"] |
136 | 137 |
|
137 | 138 | assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10 |
138 | 139 | assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20 |
@@ -194,9 +195,9 @@ def test_embed(sentry_init, capture_events, send_default_pii, include_prompts): |
194 | 195 | span = tx["spans"][0] |
195 | 196 | assert span["op"] == "ai.embeddings.create.cohere" |
196 | 197 | if send_default_pii and include_prompts: |
197 | | - assert "hello" in span["data"]["ai.input_messages"] |
| 198 | + assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES] |
198 | 199 | else: |
199 | | - assert "ai.input_messages" not in span["data"] |
| 200 | + assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] |
200 | 201 |
|
201 | 202 | assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10 |
202 | 203 | assert span["measurements"]["ai_total_tokens_used"]["value"] == 10 |
|
0 commit comments