Skip to content

Commit 149a7da

Browse files
authored
feat(ai): Add python-genai integration (#4891)
Adds support for `python-genai` integrations. It supports both sync and async clients, and both regular and streaming modes for interacting with models and building agents. Closes [PY-1733: Add agent monitoring support for `google-genai`](https://linear.app/getsentry/issue/PY-1733/add-agent-monitoring-support-for-google-genai)
1 parent b7cda42 commit 149a7da

File tree

14 files changed

+1998
-18
lines changed

14 files changed

+1998
-18
lines changed

.github/workflows/test-integrations-ai.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,10 @@ jobs:
8282
run: |
8383
set -x # print commands that are executed
8484
./scripts/runtox.sh "py${{ matrix.python-version }}-langgraph"
85+
- name: Test google_genai
86+
run: |
87+
set -x # print commands that are executed
88+
./scripts/runtox.sh "py${{ matrix.python-version }}-google_genai"
8589
- name: Test openai_agents
8690
run: |
8791
set -x # print commands that are executed

pyproject.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,10 @@ ignore_missing_imports = true
118118
module = "langgraph.*"
119119
ignore_missing_imports = true
120120

121+
[[tool.mypy.overrides]]
122+
module = "google.genai.*"
123+
ignore_missing_imports = true
124+
121125
[[tool.mypy.overrides]]
122126
module = "executing.*"
123127
ignore_missing_imports = true

scripts/populate_tox/config.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,13 @@
142142
"package": "gql[all]",
143143
"num_versions": 2,
144144
},
145+
"google_genai": {
146+
"package": "google-genai",
147+
"deps": {
148+
"*": ["pytest-asyncio"],
149+
},
150+
"python": ">=3.9",
151+
},
145152
"graphene": {
146153
"package": "graphene",
147154
"deps": {

scripts/populate_tox/releases.jsonl

Lines changed: 10 additions & 6 deletions
Large diffs are not rendered by default.

scripts/split_tox_gh_actions/split_tox_gh_actions.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@
7878
"openai-base",
7979
"openai-notiktoken",
8080
"langgraph",
81+
"google_genai",
8182
"openai_agents",
8283
"huggingface_hub",
8384
],

sentry_sdk/integrations/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ def iter_default_integrations(with_auto_enabling_integrations):
140140
"flask": (1, 1, 4),
141141
"gql": (3, 4, 1),
142142
"graphene": (3, 3),
143+
"google_genai": (1, 29, 0), # google-genai
143144
"grpc": (1, 32, 0), # grpcio
144145
"httpx": (0, 16, 0),
145146
"huggingface_hub": (0, 24, 7),
Lines changed: 298 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,298 @@
1+
from functools import wraps
2+
from typing import (
3+
Any,
4+
AsyncIterator,
5+
Callable,
6+
Iterator,
7+
List,
8+
)
9+
10+
import sentry_sdk
11+
from sentry_sdk.ai.utils import get_start_span_function
12+
from sentry_sdk.integrations import DidNotEnable, Integration
13+
from sentry_sdk.consts import OP, SPANDATA
14+
from sentry_sdk.tracing import SPANSTATUS
15+
16+
17+
try:
18+
from google.genai.models import Models, AsyncModels
19+
except ImportError:
20+
raise DidNotEnable("google-genai not installed")
21+
22+
23+
from .consts import IDENTIFIER, ORIGIN, GEN_AI_SYSTEM
24+
from .utils import (
25+
set_span_data_for_request,
26+
set_span_data_for_response,
27+
_capture_exception,
28+
prepare_generate_content_args,
29+
)
30+
from .streaming import (
31+
set_span_data_for_streaming_response,
32+
accumulate_streaming_response,
33+
)
34+
35+
36+
class GoogleGenAIIntegration(Integration):
37+
identifier = IDENTIFIER
38+
origin = ORIGIN
39+
40+
def __init__(self, include_prompts=True):
41+
# type: (GoogleGenAIIntegration, bool) -> None
42+
self.include_prompts = include_prompts
43+
44+
@staticmethod
45+
def setup_once():
46+
# type: () -> None
47+
# Patch sync methods
48+
Models.generate_content = _wrap_generate_content(Models.generate_content)
49+
Models.generate_content_stream = _wrap_generate_content_stream(
50+
Models.generate_content_stream
51+
)
52+
53+
# Patch async methods
54+
AsyncModels.generate_content = _wrap_async_generate_content(
55+
AsyncModels.generate_content
56+
)
57+
AsyncModels.generate_content_stream = _wrap_async_generate_content_stream(
58+
AsyncModels.generate_content_stream
59+
)
60+
61+
62+
def _wrap_generate_content_stream(f):
63+
# type: (Callable[..., Any]) -> Callable[..., Any]
64+
@wraps(f)
65+
def new_generate_content_stream(self, *args, **kwargs):
66+
# type: (Any, Any, Any) -> Any
67+
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
68+
if integration is None:
69+
return f(self, *args, **kwargs)
70+
71+
_model, contents, model_name = prepare_generate_content_args(args, kwargs)
72+
73+
span = get_start_span_function()(
74+
op=OP.GEN_AI_INVOKE_AGENT,
75+
name="invoke_agent",
76+
origin=ORIGIN,
77+
)
78+
span.__enter__()
79+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
80+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
81+
set_span_data_for_request(span, integration, model_name, contents, kwargs)
82+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
83+
84+
chat_span = sentry_sdk.start_span(
85+
op=OP.GEN_AI_CHAT,
86+
name=f"chat {model_name}",
87+
origin=ORIGIN,
88+
)
89+
chat_span.__enter__()
90+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
91+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
92+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
93+
set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
94+
chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
95+
96+
try:
97+
stream = f(self, *args, **kwargs)
98+
99+
# Create wrapper iterator to accumulate responses
100+
def new_iterator():
101+
# type: () -> Iterator[Any]
102+
chunks = [] # type: List[Any]
103+
try:
104+
for chunk in stream:
105+
chunks.append(chunk)
106+
yield chunk
107+
except Exception as exc:
108+
_capture_exception(exc)
109+
chat_span.set_status(SPANSTATUS.ERROR)
110+
raise
111+
finally:
112+
# Accumulate all chunks and set final response data on spans
113+
if chunks:
114+
accumulated_response = accumulate_streaming_response(chunks)
115+
set_span_data_for_streaming_response(
116+
chat_span, integration, accumulated_response
117+
)
118+
set_span_data_for_streaming_response(
119+
span, integration, accumulated_response
120+
)
121+
chat_span.__exit__(None, None, None)
122+
span.__exit__(None, None, None)
123+
124+
return new_iterator()
125+
126+
except Exception as exc:
127+
_capture_exception(exc)
128+
chat_span.__exit__(None, None, None)
129+
span.__exit__(None, None, None)
130+
raise
131+
132+
return new_generate_content_stream
133+
134+
135+
def _wrap_async_generate_content_stream(f):
136+
# type: (Callable[..., Any]) -> Callable[..., Any]
137+
@wraps(f)
138+
async def new_async_generate_content_stream(self, *args, **kwargs):
139+
# type: (Any, Any, Any) -> Any
140+
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
141+
if integration is None:
142+
return await f(self, *args, **kwargs)
143+
144+
_model, contents, model_name = prepare_generate_content_args(args, kwargs)
145+
146+
span = get_start_span_function()(
147+
op=OP.GEN_AI_INVOKE_AGENT,
148+
name="invoke_agent",
149+
origin=ORIGIN,
150+
)
151+
span.__enter__()
152+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
153+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
154+
set_span_data_for_request(span, integration, model_name, contents, kwargs)
155+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
156+
157+
chat_span = sentry_sdk.start_span(
158+
op=OP.GEN_AI_CHAT,
159+
name=f"chat {model_name}",
160+
origin=ORIGIN,
161+
)
162+
chat_span.__enter__()
163+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
164+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
165+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
166+
set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
167+
chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
168+
169+
try:
170+
stream = await f(self, *args, **kwargs)
171+
172+
# Create wrapper async iterator to accumulate responses
173+
async def new_async_iterator():
174+
# type: () -> AsyncIterator[Any]
175+
chunks = [] # type: List[Any]
176+
try:
177+
async for chunk in stream:
178+
chunks.append(chunk)
179+
yield chunk
180+
except Exception as exc:
181+
_capture_exception(exc)
182+
chat_span.set_status(SPANSTATUS.ERROR)
183+
raise
184+
finally:
185+
# Accumulate all chunks and set final response data on spans
186+
if chunks:
187+
accumulated_response = accumulate_streaming_response(chunks)
188+
set_span_data_for_streaming_response(
189+
chat_span, integration, accumulated_response
190+
)
191+
set_span_data_for_streaming_response(
192+
span, integration, accumulated_response
193+
)
194+
chat_span.__exit__(None, None, None)
195+
span.__exit__(None, None, None)
196+
197+
return new_async_iterator()
198+
199+
except Exception as exc:
200+
_capture_exception(exc)
201+
chat_span.__exit__(None, None, None)
202+
span.__exit__(None, None, None)
203+
raise
204+
205+
return new_async_generate_content_stream
206+
207+
208+
def _wrap_generate_content(f):
209+
# type: (Callable[..., Any]) -> Callable[..., Any]
210+
@wraps(f)
211+
def new_generate_content(self, *args, **kwargs):
212+
# type: (Any, Any, Any) -> Any
213+
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
214+
if integration is None:
215+
return f(self, *args, **kwargs)
216+
217+
model, contents, model_name = prepare_generate_content_args(args, kwargs)
218+
219+
with get_start_span_function()(
220+
op=OP.GEN_AI_INVOKE_AGENT,
221+
name="invoke_agent",
222+
origin=ORIGIN,
223+
) as span:
224+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
225+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
226+
set_span_data_for_request(span, integration, model_name, contents, kwargs)
227+
228+
with sentry_sdk.start_span(
229+
op=OP.GEN_AI_CHAT,
230+
name=f"chat {model_name}",
231+
origin=ORIGIN,
232+
) as chat_span:
233+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
234+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
235+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
236+
set_span_data_for_request(
237+
chat_span, integration, model_name, contents, kwargs
238+
)
239+
240+
try:
241+
response = f(self, *args, **kwargs)
242+
except Exception as exc:
243+
_capture_exception(exc)
244+
chat_span.set_status(SPANSTATUS.ERROR)
245+
raise
246+
247+
set_span_data_for_response(chat_span, integration, response)
248+
set_span_data_for_response(span, integration, response)
249+
250+
return response
251+
252+
return new_generate_content
253+
254+
255+
def _wrap_async_generate_content(f):
256+
# type: (Callable[..., Any]) -> Callable[..., Any]
257+
@wraps(f)
258+
async def new_async_generate_content(self, *args, **kwargs):
259+
# type: (Any, Any, Any) -> Any
260+
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
261+
if integration is None:
262+
return await f(self, *args, **kwargs)
263+
264+
model, contents, model_name = prepare_generate_content_args(args, kwargs)
265+
266+
with get_start_span_function()(
267+
op=OP.GEN_AI_INVOKE_AGENT,
268+
name="invoke_agent",
269+
origin=ORIGIN,
270+
) as span:
271+
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
272+
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
273+
set_span_data_for_request(span, integration, model_name, contents, kwargs)
274+
275+
with sentry_sdk.start_span(
276+
op=OP.GEN_AI_CHAT,
277+
name=f"chat {model_name}",
278+
origin=ORIGIN,
279+
) as chat_span:
280+
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
281+
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
282+
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
283+
set_span_data_for_request(
284+
chat_span, integration, model_name, contents, kwargs
285+
)
286+
try:
287+
response = await f(self, *args, **kwargs)
288+
except Exception as exc:
289+
_capture_exception(exc)
290+
chat_span.set_status(SPANSTATUS.ERROR)
291+
raise
292+
293+
set_span_data_for_response(chat_span, integration, response)
294+
set_span_data_for_response(span, integration, response)
295+
296+
return response
297+
298+
return new_async_generate_content
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
GEN_AI_SYSTEM = "gcp.gemini"
2+
3+
# Mapping of tool attributes to their descriptions
4+
# These are all tools that are available in the Google GenAI API
5+
TOOL_ATTRIBUTES_MAP = {
6+
"google_search_retrieval": "Google Search retrieval tool",
7+
"google_search": "Google Search tool",
8+
"retrieval": "Retrieval tool",
9+
"enterprise_web_search": "Enterprise web search tool",
10+
"google_maps": "Google Maps tool",
11+
"code_execution": "Code execution tool",
12+
"computer_use": "Computer use tool",
13+
}
14+
15+
IDENTIFIER = "google_genai"
16+
ORIGIN = f"auto.ai.{IDENTIFIER}"

0 commit comments

Comments
 (0)