Skip to content
Merged
30 changes: 14 additions & 16 deletions sentry_sdk/integrations/pydantic_ai/patches/agent_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import sentry_sdk

from ..spans import invoke_agent_span, update_invoke_agent_span
from ..utils import _capture_exception
from ..utils import _capture_exception, pop_agent

from typing import TYPE_CHECKING
from pydantic_ai.agent import Agent # type: ignore
Expand Down Expand Up @@ -41,14 +41,13 @@ async def __aenter__(self):
self._isolation_scope = sentry_sdk.isolation_scope()
self._isolation_scope.__enter__()

# Store agent reference and streaming flag
sentry_sdk.get_current_scope().set_context(
"pydantic_ai_agent", {"_agent": self.agent, "_streaming": self.is_streaming}
)

# Create invoke_agent span (will be closed in __aexit__)
self._span = invoke_agent_span(
self.user_prompt, self.agent, self.model, self.model_settings
self.user_prompt,
self.agent,
self.model,
self.model_settings,
self.is_streaming,
)
self._span.__enter__()

Expand All @@ -71,7 +70,9 @@ async def __aexit__(self, exc_type, exc_val, exc_tb):
if self._span is not None:
update_invoke_agent_span(self._span, output)
finally:
sentry_sdk.get_current_scope().remove_context("pydantic_ai_agent")
# Pop agent from contextvar stack
pop_agent()

# Clean up invoke span
if self._span:
self._span.__exit__(exc_type, exc_val, exc_tb)
Expand All @@ -97,19 +98,15 @@ async def wrapper(self, *args, **kwargs):
# Isolate each workflow so that when agents are run in asyncio tasks they
# don't touch each other's scopes
with sentry_sdk.isolation_scope():
# Store agent reference and streaming flag in Sentry scope for access in nested spans
# We store the full agent to allow access to tools and system prompts
sentry_sdk.get_current_scope().set_context(
"pydantic_ai_agent", {"_agent": self, "_streaming": is_streaming}
)

# Extract parameters for the span
user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
model = kwargs.get("model")
model_settings = kwargs.get("model_settings")

# Create invoke_agent span
with invoke_agent_span(user_prompt, self, model, model_settings) as span:
with invoke_agent_span(
user_prompt, self, model, model_settings, is_streaming
) as span:
try:
result = await original_func(self, *args, **kwargs)

Expand All @@ -122,7 +119,8 @@ async def wrapper(self, *args, **kwargs):
_capture_exception(exc)
raise exc from None
finally:
sentry_sdk.get_current_scope().remove_context("pydantic_ai_agent")
# Pop agent from contextvar stack
pop_agent()

return wrapper

Expand Down
43 changes: 25 additions & 18 deletions sentry_sdk/integrations/pydantic_ai/patches/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,10 @@
import sentry_sdk

from ..spans import execute_tool_span, update_execute_tool_span
from ..utils import _capture_exception
from ..utils import (
_capture_exception,
get_current_agent,
)

from typing import TYPE_CHECKING

Expand Down Expand Up @@ -49,29 +52,33 @@ async def wrapped_call_tool(self, call, allow_partial, wrap_validation_errors):
if tool and HAS_MCP and isinstance(tool.toolset, MCPServer):
tool_type = "mcp"

# Get agent from Sentry scope
current_span = sentry_sdk.get_current_span()
if current_span and tool:
agent_data = (
sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
)
agent = agent_data.get("_agent")
# Get agent from contextvar
agent = get_current_agent()

if agent and tool:
try:
args_dict = call.args_as_dict()
except Exception:
args_dict = call.args if isinstance(call.args, dict) else {}

with execute_tool_span(name, args_dict, agent, tool_type=tool_type) as span:
try:
result = await original_call_tool(
self, call, allow_partial, wrap_validation_errors
)
update_execute_tool_span(span, result)
return result
except Exception as exc:
_capture_exception(exc)
raise exc from None
# Create execute_tool span
# Nesting is handled by isolation_scope() to ensure proper parent-child relationships
with sentry_sdk.isolation_scope():
with execute_tool_span(
name,
args_dict,
agent,
tool_type=tool_type,
) as span:
try:
result = await original_call_tool(
self, call, allow_partial, wrap_validation_errors
)
update_execute_tool_span(span, result)
return result
except Exception as exc:
_capture_exception(exc)
raise exc from None

# No span context - just call original
return await original_call_tool(
Expand Down
17 changes: 5 additions & 12 deletions sentry_sdk/integrations/pydantic_ai/spans/ai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
_set_model_data,
_should_send_prompts,
_get_model_name,
get_current_agent,
get_is_streaming,
)

from typing import TYPE_CHECKING
Expand Down Expand Up @@ -216,20 +218,11 @@ def ai_client_span(messages, agent, model, model_settings):
_set_agent_data(span, agent)
_set_model_data(span, model, model_settings)

# Set streaming flag
agent_data = sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
is_streaming = agent_data.get("_streaming", False)
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, is_streaming)
# Set streaming flag from contextvar
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, get_is_streaming())

# Add available tools if agent is available
agent_obj = agent
if not agent_obj:
# Try to get from Sentry scope
agent_data = (
sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
)
agent_obj = agent_data.get("_agent")

agent_obj = agent or get_current_agent()
_set_available_tools(span, agent_obj)

# Set input messages (full conversation history)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from typing import Any
from typing import Any, Optional


def execute_tool_span(tool_name, tool_args, agent, tool_type="function"):
Expand Down
9 changes: 7 additions & 2 deletions sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
_set_available_tools,
_set_model_data,
_should_send_prompts,
push_agent,
)

from typing import TYPE_CHECKING
Expand All @@ -16,8 +17,8 @@
from typing import Any


def invoke_agent_span(user_prompt, agent, model, model_settings):
# type: (Any, Any, Any, Any) -> sentry_sdk.tracing.Span
def invoke_agent_span(user_prompt, agent, model, model_settings, is_streaming=False):
# type: (Any, Any, Any, Any, bool) -> sentry_sdk.tracing.Span
"""Create a span for invoking the agent."""
# Determine agent name for span
name = "agent"
Expand All @@ -32,6 +33,10 @@ def invoke_agent_span(user_prompt, agent, model, model_settings):

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")

# Push agent to contextvar stack immediately after span creation
# This ensures the agent is available in get_current_agent() before _set_model_data is called
push_agent(agent, is_streaming)

_set_agent_data(span, agent)
_set_model_data(span, model, model_settings)
_set_available_tools(span, agent)
Expand Down
61 changes: 49 additions & 12 deletions sentry_sdk/integrations/pydantic_ai/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import sentry_sdk
from contextvars import ContextVar
from sentry_sdk.consts import SPANDATA
from sentry_sdk.scope import should_send_default_pii
from sentry_sdk.tracing_utils import set_span_errored
Expand All @@ -7,7 +8,47 @@
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from typing import Any
from typing import Any, Optional


# Store the current agent context in a contextvar for re-entrant safety
# Using a list as a stack to support nested agent calls
_agent_context_stack = ContextVar("pydantic_ai_agent_context_stack", default=[]) # type: ContextVar[list[dict[str, Any]]]


def push_agent(agent, is_streaming=False):
# type: (Any, bool) -> None
"""Push an agent context onto the stack along with its streaming flag."""
stack = _agent_context_stack.get().copy()
stack.append({"agent": agent, "is_streaming": is_streaming})
_agent_context_stack.set(stack)


def pop_agent():
# type: () -> None
"""Pop an agent context from the stack."""
stack = _agent_context_stack.get().copy()
if stack:
stack.pop()
_agent_context_stack.set(stack)


def get_current_agent():
# type: () -> Any
"""Get the current agent from the contextvar stack."""
stack = _agent_context_stack.get()
if stack:
return stack[-1]["agent"]
return None


def get_is_streaming():
# type: () -> bool
"""Get the streaming flag from the contextvar stack."""
stack = _agent_context_stack.get()
if stack:
return stack[-1].get("is_streaming", False)
return False


def _should_send_prompts():
Expand Down Expand Up @@ -37,23 +78,20 @@ def _set_agent_data(span, agent):

Args:
span: The span to set data on
agent: Agent object (can be None, will try to get from Sentry scope if not provided)
agent: Agent object (can be None, will try to get from contextvar if not provided)
"""
# Extract agent name from agent object or Sentry scope
# Extract agent name from agent object or contextvar
agent_obj = agent
if not agent_obj:
# Try to get from Sentry scope
agent_data = (
sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
)
agent_obj = agent_data.get("_agent")
# Try to get from contextvar
agent_obj = get_current_agent()

if agent_obj and hasattr(agent_obj, "name") and agent_obj.name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_obj.name)


def _get_model_name(model_obj):
# type: (Any) -> str | None
# type: (Any) -> Optional[str]
"""Extract model name from a model object.

Args:
Expand Down Expand Up @@ -87,9 +125,8 @@ def _set_model_data(span, model, model_settings):
model: Model object (can be None, will try to get from agent if not provided)
model_settings: Model settings (can be None, will try to get from agent if not provided)
"""
# Try to get agent from Sentry scope if we need it
agent_data = sentry_sdk.get_current_scope()._contexts.get("pydantic_ai_agent") or {}
agent_obj = agent_data.get("_agent")
# Try to get agent from contextvar if we need it
agent_obj = get_current_agent()

# Extract model information
model_obj = model
Expand Down