Skip to content

Commit bca9881

Browse files
committed
Merge branch 'master' into potel-base
2 parents cd422af + 1df6c9a commit bca9881

File tree

9 files changed

+942
-47
lines changed

9 files changed

+942
-47
lines changed

requirements-aws-lambda-layer.txt

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
certifi
2-
3-
# In Lambda functions botocore is used, and botocore is not
4-
# yet supporting urllib3 1.27.0 never mind 2+.
2+
urllib3
3+
# In Lambda functions botocore is used, and botocore has
4+
# restrictions on urllib3
5+
# https://github.com/boto/botocore/blob/develop/setup.cfg
56
# So we pin this here to make our Lambda layer work with
67
# Lambda Function using Python 3.7+
78
urllib3<1.27

scripts/generate-test-files.sh

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,13 @@ set -xe
66

77
cd "$(dirname "$0")"
88

9+
rm -rf toxgen.venv
910
python -m venv toxgen.venv
1011
. toxgen.venv/bin/activate
1112

12-
pip install -e ..
13-
pip install -r populate_tox/requirements.txt
14-
pip install -r split_tox_gh_actions/requirements.txt
13+
toxgen.venv/bin/pip install -e ..
14+
toxgen.venv/bin/pip install -r populate_tox/requirements.txt
15+
toxgen.venv/bin/pip install -r split_tox_gh_actions/requirements.txt
1516

16-
python populate_tox/populate_tox.py
17-
python split_tox_gh_actions/split_tox_gh_actions.py
17+
toxgen.venv/bin/python populate_tox/populate_tox.py
18+
toxgen.venv/bin/python split_tox_gh_actions/split_tox_gh_actions.py

scripts/populate_tox/populate_tox.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from bisect import bisect_left
1111
from collections import defaultdict
1212
from datetime import datetime, timedelta, timezone # noqa: F401
13-
from importlib.metadata import metadata
13+
from importlib.metadata import PackageMetadata, distributions
1414
from packaging.specifiers import SpecifierSet
1515
from packaging.version import Version
1616
from pathlib import Path
@@ -87,6 +87,13 @@
8787
}
8888

8989

90+
def _fetch_sdk_metadata() -> PackageMetadata:
91+
(dist,) = distributions(
92+
name="sentry-sdk", path=[Path(__file__).parent.parent.parent]
93+
)
94+
return dist.metadata
95+
96+
9097
def fetch_url(url: str) -> Optional[dict]:
9198
for attempt in range(3):
9299
pypi_data = requests.get(url)
@@ -592,8 +599,9 @@ def main(fail_on_changes: bool = False) -> None:
592599
)
593600

594601
global MIN_PYTHON_VERSION, MAX_PYTHON_VERSION
602+
meta = _fetch_sdk_metadata()
595603
sdk_python_versions = _parse_python_versions_from_classifiers(
596-
metadata("sentry-sdk").get_all("Classifier")
604+
meta.get_all("Classifier")
597605
)
598606
MIN_PYTHON_VERSION = sdk_python_versions[0]
599607
MAX_PYTHON_VERSION = sdk_python_versions[-1]
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import sentry_sdk
2+
from sentry_sdk.consts import OP, SPANDATA
3+
4+
from ..consts import SPAN_ORIGIN
5+
from ..utils import (
6+
_set_agent_data,
7+
_set_input_data,
8+
_set_output_data,
9+
_set_usage_data,
10+
)
11+
12+
from typing import TYPE_CHECKING
13+
14+
if TYPE_CHECKING:
15+
from agents import Agent
16+
from typing import Any
17+
18+
19+
def ai_client_span(agent, get_response_kwargs):
20+
# type: (Agent, dict[str, Any]) -> sentry_sdk.tracing.Span
21+
# TODO-anton: implement other types of operations. Now "chat" is hardcoded.
22+
model_name = agent.model.model if hasattr(agent.model, "model") else agent.model
23+
span = sentry_sdk.start_span(
24+
op=OP.GEN_AI_CHAT,
25+
description=f"chat {model_name}",
26+
origin=SPAN_ORIGIN,
27+
)
28+
# TODO-anton: remove hardcoded stuff and replace something that also works for embedding and so on
29+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
30+
31+
return span
32+
33+
34+
def update_ai_client_span(span, agent, get_response_kwargs, result):
35+
# type: (sentry_sdk.tracing.Span, Agent, dict[str, Any], Any) -> None
36+
_set_agent_data(span, agent)
37+
_set_usage_data(span, result.usage)
38+
_set_input_data(span, get_response_kwargs)
39+
_set_output_data(span, result)
Lines changed: 210 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,210 @@
1+
import json
2+
import sentry_sdk
3+
from sentry_sdk.consts import SPANDATA
4+
from sentry_sdk.integrations import DidNotEnable
5+
from sentry_sdk.scope import should_send_default_pii
6+
from sentry_sdk.utils import event_from_exception
7+
8+
from typing import TYPE_CHECKING
9+
10+
if TYPE_CHECKING:
11+
from typing import Any
12+
from typing import Callable
13+
from typing import Union
14+
from agents import Usage
15+
16+
try:
17+
import agents
18+
19+
except ImportError:
20+
raise DidNotEnable("OpenAI Agents not installed")
21+
22+
23+
def _capture_exception(exc):
24+
# type: (Any) -> None
25+
event, hint = event_from_exception(
26+
exc,
27+
client_options=sentry_sdk.get_client().options,
28+
mechanism={"type": "openai_agents", "handled": False},
29+
)
30+
sentry_sdk.capture_event(event, hint=hint)
31+
32+
33+
def _get_start_span_function():
34+
# type: () -> Callable[..., Any]
35+
current_span = sentry_sdk.get_current_span()
36+
transaction_exists = (
37+
current_span is not None and current_span.containing_transaction == current_span
38+
)
39+
return sentry_sdk.start_span if transaction_exists else sentry_sdk.start_transaction
40+
41+
42+
def _set_agent_data(span, agent):
43+
# type: (sentry_sdk.tracing.Span, agents.Agent) -> None
44+
span.set_data(
45+
SPANDATA.GEN_AI_SYSTEM, "openai"
46+
) # See footnote for https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-system for explanation why.
47+
48+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent.name)
49+
50+
if agent.model_settings.max_tokens:
51+
span.set_data(
52+
SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, agent.model_settings.max_tokens
53+
)
54+
55+
if agent.model:
56+
model_name = agent.model.model if hasattr(agent.model, "model") else agent.model
57+
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
58+
59+
if agent.model_settings.presence_penalty:
60+
span.set_data(
61+
SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
62+
agent.model_settings.presence_penalty,
63+
)
64+
65+
if agent.model_settings.temperature:
66+
span.set_data(
67+
SPANDATA.GEN_AI_REQUEST_TEMPERATURE, agent.model_settings.temperature
68+
)
69+
70+
if agent.model_settings.top_p:
71+
span.set_data(SPANDATA.GEN_AI_REQUEST_TOP_P, agent.model_settings.top_p)
72+
73+
if agent.model_settings.frequency_penalty:
74+
span.set_data(
75+
SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
76+
agent.model_settings.frequency_penalty,
77+
)
78+
79+
if len(agent.tools) > 0:
80+
span.set_data(
81+
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
82+
safe_serialize([vars(tool) for tool in agent.tools]),
83+
)
84+
85+
86+
def _set_usage_data(span, usage):
87+
# type: (sentry_sdk.tracing.Span, Usage) -> None
88+
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
89+
span.set_data(
90+
SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
91+
usage.input_tokens_details.cached_tokens,
92+
)
93+
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
94+
span.set_data(
95+
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
96+
usage.output_tokens_details.reasoning_tokens,
97+
)
98+
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens)
99+
100+
101+
def _set_input_data(span, get_response_kwargs):
102+
# type: (sentry_sdk.tracing.Span, dict[str, Any]) -> None
103+
if not should_send_default_pii():
104+
return
105+
106+
messages_by_role = {
107+
"system": [],
108+
"user": [],
109+
"assistant": [],
110+
"tool": [],
111+
} # type: (dict[str, list[Any]])
112+
system_instructions = get_response_kwargs.get("system_instructions")
113+
if system_instructions:
114+
messages_by_role["system"].append({"type": "text", "text": system_instructions})
115+
116+
for message in get_response_kwargs.get("input", []):
117+
if "role" in message:
118+
messages_by_role[message.get("role")].append(
119+
{"type": "text", "text": message.get("content")}
120+
)
121+
else:
122+
if message.get("type") == "function_call":
123+
messages_by_role["assistant"].append(message)
124+
elif message.get("type") == "function_call_output":
125+
messages_by_role["tool"].append(message)
126+
127+
request_messages = []
128+
for role, messages in messages_by_role.items():
129+
if len(messages) > 0:
130+
request_messages.append({"role": role, "content": messages})
131+
132+
span.set_data(SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(request_messages))
133+
134+
135+
def _set_output_data(span, result):
136+
# type: (sentry_sdk.tracing.Span, Any) -> None
137+
if not should_send_default_pii():
138+
return
139+
140+
output_messages = {
141+
"response": [],
142+
"tool": [],
143+
} # type: (dict[str, list[Any]])
144+
145+
for output in result.output:
146+
if output.type == "function_call":
147+
output_messages["tool"].append(output.dict())
148+
elif output.type == "message":
149+
for output_message in output.content:
150+
try:
151+
output_messages["response"].append(output_message.text)
152+
except AttributeError:
153+
# Unknown output message type, just return the json
154+
output_messages["response"].append(output_message.dict())
155+
156+
if len(output_messages["tool"]) > 0:
157+
span.set_data(
158+
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(output_messages["tool"])
159+
)
160+
161+
if len(output_messages["response"]) > 0:
162+
span.set_data(
163+
SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(output_messages["response"])
164+
)
165+
166+
167+
def safe_serialize(data):
168+
# type: (Any) -> str
169+
"""Safely serialize to a readable string."""
170+
171+
def serialize_item(item):
172+
# type: (Any) -> Union[str, dict[Any, Any], list[Any], tuple[Any, ...]]
173+
if callable(item):
174+
try:
175+
module = getattr(item, "__module__", None)
176+
qualname = getattr(item, "__qualname__", None)
177+
name = getattr(item, "__name__", "anonymous")
178+
179+
if module and qualname:
180+
full_path = f"{module}.{qualname}"
181+
elif module and name:
182+
full_path = f"{module}.{name}"
183+
else:
184+
full_path = name
185+
186+
return f"<function {full_path}>"
187+
except Exception:
188+
return f"<callable {type(item).__name__}>"
189+
elif isinstance(item, dict):
190+
return {k: serialize_item(v) for k, v in item.items()}
191+
elif isinstance(item, (list, tuple)):
192+
return [serialize_item(x) for x in item]
193+
elif hasattr(item, "__dict__"):
194+
try:
195+
attrs = {
196+
k: serialize_item(v)
197+
for k, v in vars(item).items()
198+
if not k.startswith("_")
199+
}
200+
return f"<{type(item).__name__} {attrs}>"
201+
except Exception:
202+
return repr(item)
203+
else:
204+
return item
205+
206+
try:
207+
serialized = serialize_item(data)
208+
return json.dumps(serialized, default=str)
209+
except Exception:
210+
return str(data)

sentry_sdk/sessions.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from __future__ import annotations
22
import os
3-
import time
4-
from threading import Thread, Lock
3+
from threading import Thread, Lock, Event
54
from contextlib import contextmanager
65

76
import sentry_sdk
@@ -76,7 +75,7 @@ def __init__(
7675
self._thread_lock = Lock()
7776
self._aggregate_lock = Lock()
7877
self._thread_for_pid: Optional[int] = None
79-
self._running = True
78+
self.__shutdown_requested: Event = Event()
8079

8180
def flush(self) -> None:
8281
pending_sessions = self.pending_sessions
@@ -119,10 +118,10 @@ def _ensure_running(self) -> None:
119118
return None
120119

121120
def _thread() -> None:
122-
while self._running:
123-
time.sleep(self.flush_interval)
124-
if self._running:
125-
self.flush()
121+
running = True
122+
while running:
123+
running = not self.__shutdown_requested.wait(self.flush_interval)
124+
self.flush()
126125

127126
thread = Thread(target=_thread)
128127
thread.daemon = True
@@ -131,7 +130,7 @@ def _thread() -> None:
131130
except RuntimeError:
132131
# Unfortunately at this point the interpreter is in a state that no
133132
# longer allows us to spawn a thread and we have to bail.
134-
self._running = False
133+
self.__shutdown_requested.set()
135134
return None
136135

137136
self._thread = thread
@@ -175,7 +174,7 @@ def add_session(self, session: Session) -> None:
175174
self._ensure_running()
176175

177176
def kill(self) -> None:
178-
self._running = False
177+
self.__shutdown_requested.set()
179178

180179
def __del__(self) -> None:
181180
self.kill()

0 commit comments

Comments
 (0)