Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions sentry_sdk/integrations/openai_agents/_context_vars.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
"""
Context variables for passing data between nested calls in the OpenAI Agents integration.
"""

from contextvars import ContextVar

from typing import TYPE_CHECKING

if TYPE_CHECKING:
pass

# Context variable to pass response model between nested calls (for gen_ai.chat spans)
_response_model_context = ContextVar("openai_agents_response_model", default=None) # type: ContextVar[str | None]

# Context variable to store the last response model for invoke_agent spans
_invoke_agent_response_model_context = ContextVar(
"openai_agents_invoke_agent_response_model", default=None
) # type: ContextVar[str | None]
28 changes: 28 additions & 0 deletions sentry_sdk/integrations/openai_agents/patches/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

from sentry_sdk.integrations import DidNotEnable

from .._context_vars import (
_invoke_agent_response_model_context,
_response_model_context,
)
from ..spans import ai_client_span, update_ai_client_span

from typing import TYPE_CHECKING
Expand Down Expand Up @@ -33,12 +37,36 @@ def wrapped_get_model(cls, agent, run_config):
model = original_get_model(agent, run_config)
original_get_response = model.get_response

# Wrap _fetch_response if it exists (for OpenAI models) to capture raw response model
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

out of curiosity, why do we need to capture the raw response model?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the only place I found to get the response model. OpenAI Agents does not expose this, we only get the request model, but not the response.

if hasattr(model, "_fetch_response"):
original_fetch_response = model._fetch_response

@wraps(original_fetch_response)
async def wrapped_fetch_response(*args, **kwargs):
# type: (*Any, **Any) -> Any
response = await original_fetch_response(*args, **kwargs)
# Store model from raw response in context variable
if hasattr(response, "model"):
_response_model_context.set(str(response.model))
return response

model._fetch_response = wrapped_fetch_response

@wraps(original_get_response)
async def wrapped_get_response(*args, **kwargs):
# type: (*Any, **Any) -> Any
with ai_client_span(agent, kwargs) as span:
result = await original_get_response(*args, **kwargs)

# Retrieve response model from context and attach to ModelResponse
response_model = _response_model_context.get(None)
if response_model:
result._sentry_response_model = response_model
_response_model_context.set(None) # Clear context

# Also store for invoke_agent span (will be the last one used)
_invoke_agent_response_model_context.set(response_model)

update_ai_client_span(span, agent, kwargs, result)

return result
Expand Down
4 changes: 4 additions & 0 deletions sentry_sdk/integrations/openai_agents/spans/ai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,7 @@ def update_ai_client_span(span, agent, get_response_kwargs, result):
_set_usage_data(span, result.usage)
_set_output_data(span, result)
_create_mcp_execute_tool_spans(span, result)

# Set response model if captured from raw response
if hasattr(result, "_sentry_response_model") and result._sentry_response_model:
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, result._sentry_response_model)
13 changes: 12 additions & 1 deletion sentry_sdk/integrations/openai_agents/spans/invoke_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@
from sentry_sdk.scope import should_send_default_pii
from sentry_sdk.utils import safe_serialize

from .._context_vars import _invoke_agent_response_model_context
from ..consts import SPAN_ORIGIN
from ..utils import _set_agent_data
from ..utils import _set_agent_data, _set_usage_data

from typing import TYPE_CHECKING

Expand Down Expand Up @@ -78,6 +79,16 @@ def update_invoke_agent_span(context, agent, output):
span = sentry_sdk.get_current_span()

if span:
# Add aggregated usage data from context_wrapper
if hasattr(context, "usage"):
_set_usage_data(span, context.usage)

# Add response model if available (will be the last model used)
response_model = _invoke_agent_response_model_context.get(None)
if response_model:
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
_invoke_agent_response_model_context.set(None) # Clear after use

if should_send_default_pii():
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False
Expand Down
Loading