Skip to content

Commit 6a1d6b3

Browse files
committed
feat(integrations): openai-agents: add usage and response model reporting for chat and invoke_agent spans
1 parent 7b7ea33 commit 6a1d6b3

File tree

5 files changed

+627
-1
lines changed

5 files changed

+627
-1
lines changed
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
"""
2+
Context variables for passing data between nested calls in the OpenAI Agents integration.
3+
"""
4+
5+
from contextvars import ContextVar
6+
7+
from typing import TYPE_CHECKING
8+
9+
if TYPE_CHECKING:
10+
pass
11+
12+
# Context variable to pass response model between nested calls (for gen_ai.chat spans)
13+
_response_model_context = ContextVar("openai_agents_response_model", default=None) # type: ContextVar[str | None]
14+
15+
# Context variable to store the last response model for invoke_agent spans
16+
_invoke_agent_response_model_context = ContextVar(
17+
"openai_agents_invoke_agent_response_model", default=None
18+
) # type: ContextVar[str | None]

sentry_sdk/integrations/openai_agents/patches/models.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,10 @@
22

33
from sentry_sdk.integrations import DidNotEnable
44

5+
from .._context_vars import (
6+
_invoke_agent_response_model_context,
7+
_response_model_context,
8+
)
59
from ..spans import ai_client_span, update_ai_client_span
610

711
from typing import TYPE_CHECKING
@@ -33,12 +37,36 @@ def wrapped_get_model(cls, agent, run_config):
3337
model = original_get_model(agent, run_config)
3438
original_get_response = model.get_response
3539

40+
# Wrap _fetch_response if it exists (for OpenAI models) to capture raw response model
41+
if hasattr(model, "_fetch_response"):
42+
original_fetch_response = model._fetch_response
43+
44+
@wraps(original_fetch_response)
45+
async def wrapped_fetch_response(*args, **kwargs):
46+
# type: (*Any, **Any) -> Any
47+
response = await original_fetch_response(*args, **kwargs)
48+
# Store model from raw response in context variable
49+
if hasattr(response, "model"):
50+
_response_model_context.set(str(response.model))
51+
return response
52+
53+
model._fetch_response = wrapped_fetch_response
54+
3655
@wraps(original_get_response)
3756
async def wrapped_get_response(*args, **kwargs):
3857
# type: (*Any, **Any) -> Any
3958
with ai_client_span(agent, kwargs) as span:
4059
result = await original_get_response(*args, **kwargs)
4160

61+
# Retrieve response model from context and attach to ModelResponse
62+
response_model = _response_model_context.get(None)
63+
if response_model:
64+
result._sentry_response_model = response_model
65+
_response_model_context.set(None) # Clear context
66+
67+
# Also store for invoke_agent span (will be the last one used)
68+
_invoke_agent_response_model_context.set(response_model)
69+
4270
update_ai_client_span(span, agent, kwargs, result)
4371

4472
return result

sentry_sdk/integrations/openai_agents/spans/ai_client.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,3 +40,7 @@ def update_ai_client_span(span, agent, get_response_kwargs, result):
4040
_set_usage_data(span, result.usage)
4141
_set_output_data(span, result)
4242
_create_mcp_execute_tool_spans(span, result)
43+
44+
# Set response model if captured from raw response
45+
if hasattr(result, "_sentry_response_model") and result._sentry_response_model:
46+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, result._sentry_response_model)

sentry_sdk/integrations/openai_agents/spans/invoke_agent.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,9 @@
88
from sentry_sdk.scope import should_send_default_pii
99
from sentry_sdk.utils import safe_serialize
1010

11+
from .._context_vars import _invoke_agent_response_model_context
1112
from ..consts import SPAN_ORIGIN
12-
from ..utils import _set_agent_data
13+
from ..utils import _set_agent_data, _set_usage_data
1314

1415
from typing import TYPE_CHECKING
1516

@@ -78,6 +79,16 @@ def update_invoke_agent_span(context, agent, output):
7879
span = sentry_sdk.get_current_span()
7980

8081
if span:
82+
# Add aggregated usage data from context_wrapper
83+
if hasattr(context, "usage"):
84+
_set_usage_data(span, context.usage)
85+
86+
# Add response model if available (will be the last model used)
87+
response_model = _invoke_agent_response_model_context.get(None)
88+
if response_model:
89+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
90+
_invoke_agent_response_model_context.set(None) # Clear after use
91+
8192
if should_send_default_pii():
8293
set_data_normalized(
8394
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False

0 commit comments

Comments
 (0)