Skip to content

Commit b0264f7

Browse files
committed
Merge branch 'potel-base' into szokeasaurusrex/agents-potel
2 parents d8118e5 + bca9881 commit b0264f7

File tree

5 files changed

+68
-12
lines changed

5 files changed

+68
-12
lines changed

requirements-aws-lambda-layer.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,6 @@ urllib3
55
# https://github.com/boto/botocore/blob/develop/setup.cfg
66
# So we pin this here to make our Lambda layer work with
77
# Lambda Function using Python 3.7+
8-
urllib3<1.27; python_version < "3.10"
8+
urllib3<1.27
9+
910
opentelemetry-distro>=0.35b0

sentry_sdk/integrations/openai_agents/spans/ai_client.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,11 @@
2121
def ai_client_span(
2222
agent: Agent, get_response_kwargs: dict[str, Any]
2323
) -> sentry_sdk.tracing.Span:
24+
model_name = agent.model.model if hasattr(agent.model, "model") else agent.model
2425
# TODO-anton: implement other types of operations. Now "chat" is hardcoded.
2526
span = sentry_sdk.start_span(
2627
op=OP.GEN_AI_CHAT,
27-
description=f"chat {agent.model}",
28+
description=f"chat {model_name}",
2829
origin=SPAN_ORIGIN,
2930
)
3031
# TODO-anton: remove hardcoded stuff and replace something that also works for embedding and so on

sentry_sdk/integrations/openai_agents/utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,8 @@ def _set_agent_data(span: sentry_sdk.tracing.Span, agent: agents.Agent) -> None:
4343
)
4444

4545
if agent.model:
46-
span.set_attribute(SPANDATA.GEN_AI_REQUEST_MODEL, agent.model)
46+
model_name = agent.model.model if hasattr(agent.model, "model") else agent.model
47+
span.set_attribute(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
4748

4849
if agent.model_settings.presence_penalty:
4950
span.set_attribute(

sentry_sdk/sessions.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from __future__ import annotations
22
import os
3-
import time
4-
from threading import Thread, Lock
3+
from threading import Thread, Lock, Event
54
from contextlib import contextmanager
65

76
import sentry_sdk
@@ -76,7 +75,7 @@ def __init__(
7675
self._thread_lock = Lock()
7776
self._aggregate_lock = Lock()
7877
self._thread_for_pid: Optional[int] = None
79-
self._running = True
78+
self.__shutdown_requested: Event = Event()
8079

8180
def flush(self) -> None:
8281
pending_sessions = self.pending_sessions
@@ -119,10 +118,10 @@ def _ensure_running(self) -> None:
119118
return None
120119

121120
def _thread() -> None:
122-
while self._running:
123-
time.sleep(self.flush_interval)
124-
if self._running:
125-
self.flush()
121+
running = True
122+
while running:
123+
running = not self.__shutdown_requested.wait(self.flush_interval)
124+
self.flush()
126125

127126
thread = Thread(target=_thread)
128127
thread.daemon = True
@@ -131,7 +130,7 @@ def _thread() -> None:
131130
except RuntimeError:
132131
# Unfortunately at this point the interpreter is in a state that no
133132
# longer allows us to spawn a thread and we have to bail.
134-
self._running = False
133+
self.__shutdown_requested.set()
135134
return None
136135

137136
self._thread = thread
@@ -175,7 +174,7 @@ def add_session(self, session: Session) -> None:
175174
self._ensure_running()
176175

177176
def kill(self) -> None:
178-
self._running = False
177+
self.__shutdown_requested.set()
179178

180179
def __del__(self) -> None:
181180
self.kill()

tests/integrations/openai_agents/test_openai_agents.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,24 @@ def test_agent():
7474
)
7575

7676

77+
@pytest.fixture
78+
def test_agent_custom_model():
79+
"""Create a real Agent instance for testing."""
80+
return Agent(
81+
name="test_agent_custom_model",
82+
instructions="You are a helpful test assistant.",
83+
# the model could be agents.OpenAIChatCompletionsModel()
84+
model=MagicMock(model="my-custom-model"),
85+
model_settings=ModelSettings(
86+
max_tokens=100,
87+
temperature=0.7,
88+
top_p=1.0,
89+
presence_penalty=0.0,
90+
frequency_penalty=0.0,
91+
),
92+
)
93+
94+
7795
@pytest.mark.asyncio
7896
async def test_agent_invocation_span(
7997
sentry_init, capture_events, test_agent, mock_model_response
@@ -128,6 +146,42 @@ async def test_agent_invocation_span(
128146
assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0
129147

130148

149+
@pytest.mark.asyncio
150+
async def test_client_span_custom_model(
151+
sentry_init, capture_events, test_agent_custom_model, mock_model_response
152+
):
153+
"""
154+
Test that the integration uses the correct model name if a custom model is used.
155+
"""
156+
157+
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
158+
with patch(
159+
"agents.models.openai_responses.OpenAIResponsesModel.get_response"
160+
) as mock_get_response:
161+
mock_get_response.return_value = mock_model_response
162+
163+
sentry_init(
164+
integrations=[OpenAIAgentsIntegration()],
165+
traces_sample_rate=1.0,
166+
)
167+
168+
events = capture_events()
169+
170+
result = await agents.Runner.run(
171+
test_agent_custom_model, "Test input", run_config=test_run_config
172+
)
173+
174+
assert result is not None
175+
assert result.final_output == "Hello, how can I help you?"
176+
177+
(transaction,) = events
178+
spans = transaction["spans"]
179+
_, ai_client_span = spans
180+
181+
assert ai_client_span["description"] == "chat my-custom-model"
182+
assert ai_client_span["data"]["gen_ai.request.model"] == "my-custom-model"
183+
184+
131185
def test_agent_invocation_span_sync(
132186
sentry_init, capture_events, test_agent, mock_model_response
133187
):

0 commit comments

Comments
 (0)