Skip to content

Commit 205cb67

Browse files
update tests
1 parent 6830d12 commit 205cb67

File tree

1 file changed

+9
-61
lines changed

1 file changed

+9
-61
lines changed

tests/test_ai_monitoring.py

Lines changed: 9 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from sentry_sdk.ai.monitoring import ai_track
99
from sentry_sdk.ai.utils import (
1010
MAX_GEN_AI_MESSAGE_BYTES,
11+
MAX_SINGLE_MESSAGE_CONTENT_CHARS,
1112
set_data_normalized,
1213
truncate_and_annotate_messages,
1314
truncate_messages_by_size,
@@ -177,7 +178,6 @@ async def async_tool(**kwargs):
177178

178179
@pytest.fixture
179180
def sample_messages():
180-
"""Sample messages similar to what gen_ai integrations would use"""
181181
return [
182182
{"role": "system", "content": "You are a helpful assistant."},
183183
{
@@ -226,8 +226,7 @@ def test_truncation_removes_oldest_first(self, large_messages):
226226
)
227227
assert len(result) < len(large_messages)
228228

229-
if result:
230-
assert result[-1] == large_messages[-1]
229+
assert result[-1] == large_messages[-1]
231230
assert truncation_index == len(large_messages) - len(result)
232231

233232
def test_empty_messages_list(self):
@@ -278,26 +277,22 @@ def test_progressive_truncation(self, large_messages):
278277
assert current_count >= 1
279278
prev_count = current_count
280279

281-
def test_individual_message_truncation(self):
282-
large_content = "This is a very long message. " * 1000
280+
def test_single_message_truncation(self):
281+
large_content = "This is a very long message. " * 10_000
283282

284283
messages = [
285284
{"role": "system", "content": "You are a helpful assistant."},
286285
{"role": "user", "content": large_content},
287286
]
288287

289288
result, truncation_index = truncate_messages_by_size(
290-
messages, max_bytes=MAX_GEN_AI_MESSAGE_BYTES
289+
messages, max_single_message_chars=MAX_SINGLE_MESSAGE_CONTENT_CHARS
291290
)
292291

293-
assert len(result) > 0
294-
295-
total_size = len(json.dumps(result, separators=(",", ":")).encode("utf-8"))
296-
assert total_size <= MAX_GEN_AI_MESSAGE_BYTES
297-
298-
for msg in result:
299-
msg_size = len(json.dumps(msg, separators=(",", ":")).encode("utf-8"))
300-
assert msg_size <= MAX_GEN_AI_MESSAGE_BYTES
292+
assert len(result) == 1
293+
assert (
294+
len(result[0]["content"].rstrip("...")) <= MAX_SINGLE_MESSAGE_CONTENT_CHARS
295+
)
301296

302297
# If the last message is too large, the system message is not present
303298
system_msgs = [m for m in result if m.get("role") == "system"]
@@ -309,53 +304,6 @@ def test_individual_message_truncation(self):
309304
assert user_msgs[0]["content"].endswith("...")
310305
assert len(user_msgs[0]["content"]) < len(large_content)
311306

312-
def test_combined_individual_and_array_truncation(self):
313-
huge_content = "X" * 25000
314-
medium_content = "Y" * 5000
315-
316-
messages = [
317-
{"role": "system", "content": medium_content},
318-
{"role": "user", "content": huge_content},
319-
{"role": "assistant", "content": medium_content},
320-
{"role": "user", "content": "small"},
321-
]
322-
323-
result, truncation_index = truncate_messages_by_size(
324-
messages, max_bytes=MAX_GEN_AI_MESSAGE_BYTES
325-
)
326-
327-
assert len(result) > 0
328-
329-
total_size = len(json.dumps(result, separators=(",", ":")).encode("utf-8"))
330-
assert total_size <= MAX_GEN_AI_MESSAGE_BYTES
331-
332-
for msg in result:
333-
msg_size = len(json.dumps(msg, separators=(",", ":")).encode("utf-8"))
334-
assert msg_size <= MAX_GEN_AI_MESSAGE_BYTES
335-
336-
# The last user "small" message should always be present and untruncated
337-
last_user_msgs = [
338-
m for m in result if m.get("role") == "user" and m["content"] == "small"
339-
]
340-
assert len(last_user_msgs) == 1
341-
342-
# If the huge message is present, it must be truncated
343-
for user_msg in [
344-
m for m in result if m.get("role") == "user" and "X" in m["content"]
345-
]:
346-
assert user_msg["content"].endswith("...")
347-
assert len(user_msg["content"]) < len(huge_content)
348-
349-
# The medium messages, if present, should not be truncated
350-
for expected_role in ["system", "assistant"]:
351-
role_msgs = [m for m in result if m.get("role") == expected_role]
352-
if role_msgs:
353-
assert role_msgs[0]["content"].startswith("Y")
354-
assert len(role_msgs[0]["content"]) <= len(medium_content)
355-
assert not role_msgs[0]["content"].endswith("...") or len(
356-
role_msgs[0]["content"]
357-
) == len(medium_content)
358-
359307

360308
class TestTruncateAndAnnotateMessages:
361309
def test_no_truncation_returns_list(self, sample_messages):

0 commit comments

Comments
 (0)