Skip to content

Commit 7f335f9

Browse files
committed
fix(integrations): cleanups and working in feedback
1 parent a94870e commit 7f335f9

File tree

8 files changed

+218
-247
lines changed

8 files changed

+218
-247
lines changed

sentry_sdk/integrations/pydantic_ai/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
try:
1111
import pydantic_ai
12-
1312
except ImportError:
1413
raise DidNotEnable("pydantic-ai not installed")
1514

sentry_sdk/integrations/pydantic_ai/patches/agent_run.py

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,28 @@
11
from functools import wraps
22

33
import sentry_sdk
4-
from sentry_sdk.integrations import DidNotEnable
4+
from sentry_sdk.tracing_utils import set_span_errored
5+
from sentry_sdk.utils import event_from_exception
56

6-
from ..spans import agent_workflow_span, invoke_agent_span, update_invoke_agent_span
7-
from ..utils import _capture_exception
7+
from ..spans import agent_workflow_span
88

99
from typing import TYPE_CHECKING
10+
from pydantic_ai.agent import Agent
1011

1112
if TYPE_CHECKING:
1213
from typing import Any, Callable, Optional
1314

14-
try:
15-
import pydantic_ai
16-
from pydantic_ai.agent import Agent
17-
except ImportError:
18-
raise DidNotEnable("pydantic-ai not installed")
15+
16+
def _capture_exception(exc):
17+
# type: (Any) -> None
18+
set_span_errored()
19+
20+
event, hint = event_from_exception(
21+
exc,
22+
client_options=sentry_sdk.get_client().options,
23+
mechanism={"type": "pydantic_ai", "handled": False},
24+
)
25+
sentry_sdk.capture_event(event, hint=hint)
1926

2027

2128
class _StreamingContextManagerWrapper:

sentry_sdk/integrations/pydantic_ai/patches/graph_nodes.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,18 @@
11
from functools import wraps
22

3-
from sentry_sdk.integrations import DidNotEnable
4-
53
from ..spans import (
64
invoke_agent_span,
75
update_invoke_agent_span,
86
ai_client_span,
97
update_ai_client_span,
108
)
9+
from pydantic_ai._agent_graph import UserPromptNode, ModelRequestNode, CallToolsNode
1110

1211
from typing import TYPE_CHECKING
1312

1413
if TYPE_CHECKING:
1514
from typing import Any, Callable
1615

17-
try:
18-
import pydantic_ai
19-
from pydantic_ai._agent_graph import UserPromptNode, ModelRequestNode, CallToolsNode
20-
except ImportError:
21-
raise DidNotEnable("pydantic-ai not installed")
22-
2316

2417
def _patch_graph_nodes():
2518
# type: () -> None

sentry_sdk/integrations/pydantic_ai/patches/model_request.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,13 @@
11
from functools import wraps
2+
from typing import TYPE_CHECKING
23

3-
from sentry_sdk.integrations import DidNotEnable
4+
from pydantic_ai import models
45

56
from ..spans import ai_client_span, update_ai_client_span
67

7-
from typing import TYPE_CHECKING
88

99
if TYPE_CHECKING:
10-
from typing import Any, Callable
11-
12-
try:
13-
import pydantic_ai
14-
except ImportError:
15-
raise DidNotEnable("pydantic-ai not installed")
10+
from typing import Any
1611

1712

1813
def _patch_model_request():
@@ -23,7 +18,6 @@ def _patch_model_request():
2318
In pydantic-ai, model requests are handled through the Model interface.
2419
We need to patch the request method on models to create spans.
2520
"""
26-
from pydantic_ai import models
2721

2822
# Patch the base Model class's request method
2923
if hasattr(models, "Model"):

sentry_sdk/integrations/pydantic_ai/patches/tools.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
from functools import wraps
22

3-
from sentry_sdk.integrations import DidNotEnable
3+
from pydantic_ai.toolsets.abstract import AbstractToolset
4+
from pydantic_ai.toolsets.function import FunctionToolset
5+
6+
import sentry_sdk
47

58
from ..spans import execute_tool_span, update_execute_tool_span
69

@@ -9,11 +12,6 @@
912
if TYPE_CHECKING:
1013
from typing import Any, Callable
1114

12-
try:
13-
import pydantic_ai
14-
except ImportError:
15-
raise DidNotEnable("pydantic-ai not installed")
16-
1715

1816
def _patch_tool_execution():
1917
# type: () -> None
@@ -26,8 +24,6 @@ def _patch_tool_execution():
2624
Note: pydantic-ai has built-in OpenTelemetry instrumentation for tools.
2725
Our patching adds Sentry-specific span data on top of that.
2826
"""
29-
from pydantic_ai.toolsets.abstract import AbstractToolset
30-
from pydantic_ai.toolsets.function import FunctionToolset
3127

3228
def create_wrapped_call_tool(original_call_tool):
3329
# type: (Callable[..., Any]) -> Callable[..., Any]
@@ -37,7 +33,6 @@ def create_wrapped_call_tool(original_call_tool):
3733
async def wrapped_call_tool(self, name, args_dict, ctx, tool):
3834
# type: (Any, str, Any, Any, Any) -> Any
3935
# Always create span if we're in a Sentry transaction context
40-
import sentry_sdk
4136

4237
current_span = sentry_sdk.get_current_span()
4338
should_create_span = current_span is not None

sentry_sdk/integrations/pydantic_ai/spans/ai_client.py

Lines changed: 158 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
import sentry_sdk
2+
from sentry_sdk.ai.utils import set_data_normalized
23
from sentry_sdk.consts import OP, SPANDATA
4+
from sentry_sdk.utils import safe_serialize
35

46
from ..consts import SPAN_ORIGIN
57
from ..utils import (
6-
_get_model_name,
78
_set_agent_data,
89
_set_model_data,
9-
_set_usage_data,
10-
_set_input_messages,
11-
_set_output_data,
10+
_should_send_prompts,
11+
_get_model_name,
1212
)
1313

1414
from typing import TYPE_CHECKING
@@ -17,6 +17,158 @@
1717
from typing import Any
1818

1919

20+
def _set_usage_data(span, usage):
21+
# type: (sentry_sdk.tracing.Span, RequestUsage) -> None
22+
"""Set token usage data on a span."""
23+
if usage is None:
24+
return
25+
26+
if hasattr(usage, "input_tokens") and usage.input_tokens is not None:
27+
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
28+
29+
if hasattr(usage, "output_tokens") and usage.output_tokens is not None:
30+
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
31+
32+
if hasattr(usage, "total_tokens") and usage.total_tokens is not None:
33+
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens)
34+
35+
36+
def _set_input_messages(span, messages):
37+
# type: (sentry_sdk.tracing.Span, Any) -> None
38+
"""Set input messages data on a span."""
39+
if not _should_send_prompts():
40+
return
41+
42+
if not messages:
43+
return
44+
45+
try:
46+
formatted_messages = []
47+
system_prompt = None
48+
49+
# Extract system prompt from any ModelRequest with instructions
50+
for msg in messages:
51+
if hasattr(msg, "instructions") and msg.instructions:
52+
system_prompt = msg.instructions
53+
break
54+
55+
# Add system prompt as first message if present
56+
if system_prompt:
57+
formatted_messages.append(
58+
{"role": "system", "content": [{"type": "text", "text": system_prompt}]}
59+
)
60+
61+
for msg in messages:
62+
if hasattr(msg, "parts"):
63+
for part in msg.parts:
64+
role = "user"
65+
if hasattr(part, "__class__"):
66+
if "System" in part.__class__.__name__:
67+
role = "system"
68+
elif (
69+
"Assistant" in part.__class__.__name__
70+
or "Text" in part.__class__.__name__
71+
or "ToolCall" in part.__class__.__name__
72+
):
73+
role = "assistant"
74+
elif "ToolReturn" in part.__class__.__name__:
75+
role = "tool"
76+
77+
content = [] # type: List[Dict[str, Any] | str]
78+
tool_calls = None
79+
tool_call_id = None
80+
81+
# Handle ToolCallPart (assistant requesting tool use)
82+
if "ToolCall" in part.__class__.__name__:
83+
tool_call_data = {}
84+
if hasattr(part, "tool_name"):
85+
tool_call_data["name"] = part.tool_name
86+
if hasattr(part, "args"):
87+
tool_call_data["arguments"] = safe_serialize(part.args)
88+
if tool_call_data:
89+
tool_calls = [tool_call_data]
90+
# Handle ToolReturnPart (tool result)
91+
elif "ToolReturn" in part.__class__.__name__:
92+
if hasattr(part, "tool_name"):
93+
tool_call_id = part.tool_name
94+
if hasattr(part, "content"):
95+
content.append({"type": "text", "text": str(part.content)})
96+
# Handle regular content
97+
elif hasattr(part, "content"):
98+
if isinstance(part.content, str):
99+
content.append({"type": "text", "text": part.content})
100+
elif isinstance(part.content, list):
101+
for item in part.content:
102+
if isinstance(item, str):
103+
content.append({"type": "text", "text": item})
104+
else:
105+
content.append(safe_serialize(item))
106+
else:
107+
content.append({"type": "text", "text": str(part.content)})
108+
109+
# Add message if we have content or tool calls
110+
if content or tool_calls:
111+
message = {"role": role} # type: Dict[str, Any]
112+
if content:
113+
message["content"] = content
114+
if tool_calls:
115+
message["tool_calls"] = tool_calls
116+
if tool_call_id:
117+
message["tool_call_id"] = tool_call_id
118+
formatted_messages.append(message)
119+
120+
if formatted_messages:
121+
set_data_normalized(
122+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, formatted_messages, unpack=False
123+
)
124+
except Exception:
125+
# If we fail to format messages, just skip it
126+
pass
127+
128+
129+
def _set_output_data(span, response):
130+
# type: (sentry_sdk.tracing.Span, Any) -> None
131+
"""Set output data on a span."""
132+
if not _should_send_prompts():
133+
return
134+
135+
if not response:
136+
return
137+
138+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_name)
139+
try:
140+
# Extract text from ModelResponse
141+
if hasattr(response, "parts"):
142+
texts = []
143+
tool_calls = []
144+
145+
for part in response.parts:
146+
if hasattr(part, "__class__"):
147+
if "Text" in part.__class__.__name__ and hasattr(part, "content"):
148+
texts.append(part.content)
149+
elif "ToolCall" in part.__class__.__name__:
150+
tool_call_data = {
151+
"type": "function",
152+
}
153+
if hasattr(part, "tool_name"):
154+
tool_call_data["name"] = part.tool_name
155+
if hasattr(part, "args"):
156+
tool_call_data["arguments"] = safe_serialize(part.args)
157+
tool_calls.append(tool_call_data)
158+
159+
if texts:
160+
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, texts)
161+
162+
if tool_calls:
163+
span.set_data(
164+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls)
165+
)
166+
167+
except Exception:
168+
# If we fail to format output, just skip it
169+
pass
170+
171+
20172
def ai_client_span(messages, agent, model, model_settings):
21173
# type: (Any, Any, Any, Any) -> sentry_sdk.tracing.Span
22174
"""Create a span for an AI client call (model request).
@@ -72,11 +224,11 @@ def ai_client_span(messages, agent, model, model_settings):
72224
# Add description from function_schema if available
73225
if hasattr(tool, "function_schema"):
74226
schema = tool.function_schema
75-
if hasattr(schema, "description") and schema.description:
227+
if getattr(schema, "description", None):
76228
tool_info["description"] = schema.description
77229

78230
# Add parameters from json_schema
79-
if hasattr(schema, "json_schema") and schema.json_schema:
231+
if getattr(schema, "json_schema", None):
80232
tool_info["parameters"] = schema.json_schema
81233

82234
tools.append(tool_info)

sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,11 @@
44
from sentry_sdk.utils import safe_serialize
55

66
from ..consts import SPAN_ORIGIN
7-
from ..utils import _set_agent_data, _set_model_data, _should_send_prompts
7+
from ..utils import (
8+
_set_agent_data,
9+
_set_model_data,
10+
_should_send_prompts,
11+
)
812

913
from typing import TYPE_CHECKING
1014

@@ -46,11 +50,11 @@ def invoke_agent_span(user_prompt, agent, model, model_settings):
4650
# Add description from function_schema if available
4751
if hasattr(tool, "function_schema"):
4852
schema = tool.function_schema
49-
if hasattr(schema, "description") and schema.description:
53+
if getattr(schema, "description", None):
5054
tool_info["description"] = schema.description
5155

5256
# Add parameters from json_schema
53-
if hasattr(schema, "json_schema") and schema.json_schema:
57+
if getattr(schema, "json_schema", None):
5458
tool_info["parameters"] = schema.json_schema
5559

5660
tools.append(tool_info)
@@ -72,14 +76,14 @@ def invoke_agent_span(user_prompt, agent, model, model_settings):
7276

7377
if agent:
7478
# Check for system_prompt
75-
if hasattr(agent, "_system_prompts") and agent._system_prompts:
76-
for prompt in agent._system_prompts:
77-
if isinstance(prompt, str):
78-
system_texts.append(prompt)
79+
system_prompts = getattr(agent, "_system_prompts", None) or []
80+
for prompt in system_prompts:
81+
if isinstance(prompt, str):
82+
system_texts.append(prompt)
7983

8084
# Check for instructions (stored in _instructions)
81-
if hasattr(agent, "_instructions") and agent._instructions:
82-
instructions = agent._instructions
85+
instructions = getattr(agent, "_instructions", None)
86+
if instructions:
8387
if isinstance(instructions, str):
8488
system_texts.append(instructions)
8589
elif isinstance(instructions, (list, tuple)):
@@ -134,9 +138,8 @@ def update_invoke_agent_span(span, output):
134138
# type: (sentry_sdk.tracing.Span, Any) -> None
135139
"""Update and close the invoke agent span."""
136140
if span and _should_send_prompts() and output:
137-
output_text = str(output) if not isinstance(output, str) else output
138141
set_data_normalized(
139-
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_text, unpack=False
142+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, str(output), unpack=False
140143
)
141144

142145
if span:

0 commit comments

Comments
 (0)