Skip to content

Commit b4f164e

Browse files
committed
Make sure to never send objects, but rather strings
1 parent f054a8f commit b4f164e

File tree

2 files changed

+169
-81
lines changed

2 files changed

+169
-81
lines changed

sentry_sdk/integrations/openai_agents/utils.py

Lines changed: 54 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import json
12
import sentry_sdk
23
from sentry_sdk.consts import SPANDATA
34
from sentry_sdk.integrations import DidNotEnable
@@ -76,7 +77,7 @@ def _set_agent_data(span, agent):
7677
if len(agent.tools) > 0:
7778
span.set_data(
7879
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
79-
[vars(tool) for tool in agent.tools],
80+
safe_serialize([vars(tool) for tool in agent.tools]),
8081
)
8182

8283

@@ -126,7 +127,7 @@ def _set_input_data(span, get_response_kwargs):
126127
if len(messages) > 0:
127128
request_messages.append({"role": role, "content": messages})
128129

129-
span.set_data(SPANDATA.GEN_AI_REQUEST_MESSAGES, request_messages)
130+
span.set_data(SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(request_messages))
130131

131132

132133
def _set_output_data(span, result):
@@ -148,10 +149,58 @@ def _set_output_data(span, result):
148149
output_messages["response"].append(output_message.text)
149150
except AttributeError:
150151
# Unknown output message type, just return the json
151-
output_messages["response"].append(output_message.to_json())
152+
output_messages["response"].append(output_message.dict())
152153

153154
if len(output_messages["tool"]) > 0:
154-
span.set_data(SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, output_messages["tool"])
155+
span.set_data(
156+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(output_messages["tool"])
157+
)
155158

156159
if len(output_messages["response"]) > 0:
157-
span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"])
160+
span.set_data(
161+
SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(output_messages["response"])
162+
)
163+
164+
165+
def safe_serialize(data):
166+
"""Safely serialize to a readable string."""
167+
168+
def serialize_item(item):
169+
if callable(item):
170+
try:
171+
module = getattr(item, "__module__", None)
172+
qualname = getattr(item, "__qualname__", None)
173+
name = getattr(item, "__name__", "anonymous")
174+
175+
if module and qualname:
176+
full_path = f"{module}.{qualname}"
177+
elif module and name:
178+
full_path = f"{module}.{name}"
179+
else:
180+
full_path = name
181+
182+
return f"<function {full_path}>"
183+
except Exception:
184+
return f"<callable {type(item).__name__}>"
185+
elif isinstance(item, dict):
186+
return {k: serialize_item(v) for k, v in item.items()}
187+
elif isinstance(item, (list, tuple)):
188+
return [serialize_item(x) for x in item]
189+
elif hasattr(item, "__dict__"):
190+
try:
191+
attrs = {
192+
k: serialize_item(v)
193+
for k, v in vars(item).items()
194+
if not k.startswith("_")
195+
}
196+
return f"<{type(item).__name__} {attrs}>"
197+
except Exception:
198+
return repr(item)
199+
else:
200+
return item
201+
202+
try:
203+
serialized = serialize_item(data)
204+
return json.dumps(serialized, default=str)
205+
except Exception:
206+
return str(data)

tests/integrations/openai_agents/test_openai_agents.py

Lines changed: 115 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
1-
from unittest import mock
1+
import re
22
import pytest
33
from unittest.mock import MagicMock, patch
44
import os
55

66
from sentry_sdk.integrations.openai_agents import OpenAIAgentsIntegration
7+
from sentry_sdk.integrations.openai_agents.utils import safe_serialize
78

89
import agents
910
from agents import (
@@ -360,22 +361,24 @@ def simple_test_tool(message: str) -> str:
360361
ai_client_span2,
361362
) = spans
362363

363-
available_tools = [
364-
{
365-
"name": "simple_test_tool",
366-
"description": "A simple tool",
367-
"params_json_schema": {
368-
"properties": {"message": {"title": "Message", "type": "string"}},
369-
"required": ["message"],
370-
"title": "simple_test_tool_args",
371-
"type": "object",
372-
"additionalProperties": False,
373-
},
374-
"on_invoke_tool": mock.ANY,
375-
"strict_json_schema": True,
376-
"is_enabled": True,
377-
}
378-
]
364+
available_tools = safe_serialize(
365+
[
366+
{
367+
"name": "simple_test_tool",
368+
"description": "A simple tool",
369+
"params_json_schema": {
370+
"properties": {"message": {"title": "Message", "type": "string"}},
371+
"required": ["message"],
372+
"title": "simple_test_tool_args",
373+
"type": "object",
374+
"additionalProperties": False,
375+
},
376+
"on_invoke_tool": "<function agents.tool.function_tool.<locals>._create_function_tool.<locals>._on_invoke_tool>",
377+
"strict_json_schema": True,
378+
"is_enabled": True,
379+
}
380+
]
381+
)
379382

380383
assert transaction["transaction"] == "test_agent workflow"
381384
assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents"
@@ -397,16 +400,22 @@ def simple_test_tool(message: str) -> str:
397400
assert ai_client_span1["data"]["gen_ai.agent.name"] == "test_agent"
398401
assert ai_client_span1["data"]["gen_ai.request.available_tools"] == available_tools
399402
assert ai_client_span1["data"]["gen_ai.request.max_tokens"] == 100
400-
assert ai_client_span1["data"]["gen_ai.request.messages"] == [
401-
{
402-
"role": "system",
403-
"content": [{"type": "text", "text": "You are a helpful test assistant."}],
404-
},
405-
{
406-
"role": "user",
407-
"content": [{"type": "text", "text": "Please use the simple test tool"}],
408-
},
409-
]
403+
assert ai_client_span1["data"]["gen_ai.request.messages"] == safe_serialize(
404+
[
405+
{
406+
"role": "system",
407+
"content": [
408+
{"type": "text", "text": "You are a helpful test assistant."}
409+
],
410+
},
411+
{
412+
"role": "user",
413+
"content": [
414+
{"type": "text", "text": "Please use the simple test tool"}
415+
],
416+
},
417+
]
418+
)
410419
assert ai_client_span1["data"]["gen_ai.request.model"] == "gpt-4"
411420
assert ai_client_span1["data"]["gen_ai.request.temperature"] == 0.7
412421
assert ai_client_span1["data"]["gen_ai.request.top_p"] == 1.0
@@ -415,22 +424,35 @@ def simple_test_tool(message: str) -> str:
415424
assert ai_client_span1["data"]["gen_ai.usage.output_tokens"] == 5
416425
assert ai_client_span1["data"]["gen_ai.usage.output_tokens.reasoning"] == 0
417426
assert ai_client_span1["data"]["gen_ai.usage.total_tokens"] == 15
418-
assert ai_client_span1["data"]["gen_ai.response.tool_calls"] == [
419-
{
420-
"arguments": '{"message": "hello"}',
421-
"call_id": "call_123",
422-
"name": "simple_test_tool",
423-
"type": "function_call",
424-
"id": "call_123",
425-
"status": None,
426-
"function": mock.ANY,
427-
}
428-
]
427+
assert re.sub(
428+
r"SerializationIterator\(.*\)",
429+
"NOT_CHECKED",
430+
ai_client_span1["data"]["gen_ai.response.tool_calls"],
431+
) == safe_serialize(
432+
[
433+
{
434+
"arguments": '{"message": "hello"}',
435+
"call_id": "call_123",
436+
"name": "simple_test_tool",
437+
"type": "function_call",
438+
"id": "call_123",
439+
"status": None,
440+
"function": "NOT_CHECKED",
441+
}
442+
]
443+
)
429444

430445
assert tool_span["description"] == "execute_tool simple_test_tool"
431446
assert tool_span["data"]["gen_ai.agent.name"] == "test_agent"
432447
assert tool_span["data"]["gen_ai.operation.name"] == "execute_tool"
433-
assert tool_span["data"]["gen_ai.request.available_tools"] == available_tools
448+
assert (
449+
re.sub(
450+
"<.*>(,)",
451+
r"'NOT_CHECKED'\1",
452+
agent_span["data"]["gen_ai.request.available_tools"],
453+
)
454+
== available_tools
455+
)
434456
assert tool_span["data"]["gen_ai.request.max_tokens"] == 100
435457
assert tool_span["data"]["gen_ai.request.model"] == "gpt-4"
436458
assert tool_span["data"]["gen_ai.request.temperature"] == 0.7
@@ -445,47 +467,64 @@ def simple_test_tool(message: str) -> str:
445467
assert ai_client_span2["description"] == "chat gpt-4"
446468
assert ai_client_span2["data"]["gen_ai.agent.name"] == "test_agent"
447469
assert ai_client_span2["data"]["gen_ai.operation.name"] == "chat"
448-
assert ai_client_span2["data"]["gen_ai.request.available_tools"] == available_tools
470+
assert (
471+
re.sub(
472+
"<.*>(,)",
473+
r"'NOT_CHECKED'\1",
474+
agent_span["data"]["gen_ai.request.available_tools"],
475+
)
476+
== available_tools
477+
)
449478
assert ai_client_span2["data"]["gen_ai.request.max_tokens"] == 100
450-
assert ai_client_span2["data"]["gen_ai.request.messages"] == [
451-
{
452-
"role": "system",
453-
"content": [{"type": "text", "text": "You are a helpful test assistant."}],
454-
},
455-
{
456-
"role": "user",
457-
"content": [{"type": "text", "text": "Please use the simple test tool"}],
458-
},
459-
{
460-
"role": "assistant",
461-
"content": [
462-
{
463-
"arguments": '{"message": "hello"}',
464-
"call_id": "call_123",
465-
"name": "simple_test_tool",
466-
"type": "function_call",
467-
"id": "call_123",
468-
"function": mock.ANY,
469-
}
470-
],
471-
},
472-
{
473-
"role": "tool",
474-
"content": [
475-
{
476-
"call_id": "call_123",
477-
"output": "Tool executed with: hello",
478-
"type": "function_call_output",
479-
}
480-
],
481-
},
482-
]
479+
assert re.sub(
480+
r"SerializationIterator\(.*\)",
481+
"NOT_CHECKED",
482+
ai_client_span2["data"]["gen_ai.request.messages"],
483+
) == safe_serialize(
484+
[
485+
{
486+
"role": "system",
487+
"content": [
488+
{"type": "text", "text": "You are a helpful test assistant."}
489+
],
490+
},
491+
{
492+
"role": "user",
493+
"content": [
494+
{"type": "text", "text": "Please use the simple test tool"}
495+
],
496+
},
497+
{
498+
"role": "assistant",
499+
"content": [
500+
{
501+
"arguments": '{"message": "hello"}',
502+
"call_id": "call_123",
503+
"name": "simple_test_tool",
504+
"type": "function_call",
505+
"id": "call_123",
506+
"function": "NOT_CHECKED",
507+
}
508+
],
509+
},
510+
{
511+
"role": "tool",
512+
"content": [
513+
{
514+
"call_id": "call_123",
515+
"output": "Tool executed with: hello",
516+
"type": "function_call_output",
517+
}
518+
],
519+
},
520+
]
521+
)
483522
assert ai_client_span2["data"]["gen_ai.request.model"] == "gpt-4"
484523
assert ai_client_span2["data"]["gen_ai.request.temperature"] == 0.7
485524
assert ai_client_span2["data"]["gen_ai.request.top_p"] == 1.0
486-
assert ai_client_span2["data"]["gen_ai.response.text"] == [
487-
"Task completed using the tool"
488-
]
525+
assert ai_client_span2["data"]["gen_ai.response.text"] == safe_serialize(
526+
["Task completed using the tool"]
527+
)
489528
assert ai_client_span2["data"]["gen_ai.system"] == "openai"
490529
assert ai_client_span2["data"]["gen_ai.usage.input_tokens.cached"] == 0
491530
assert ai_client_span2["data"]["gen_ai.usage.input_tokens"] == 15

0 commit comments

Comments
 (0)