Skip to content

Commit 8a1b40a

Browse files
committed
Add support for async calls in Anthropic, OpenAI integrations
1 parent 0fb9606 commit 8a1b40a

File tree

2 files changed

+276
-232
lines changed

2 files changed

+276
-232
lines changed

sentry_sdk/integrations/anthropic.py

Lines changed: 109 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,13 @@
1515
from typing import TYPE_CHECKING
1616

1717
try:
18-
from anthropic.resources import Messages
18+
from anthropic.resources import Messages, AsyncMessages
1919

2020
if TYPE_CHECKING:
2121
from anthropic.types import MessageStreamEvent
2222
except ImportError:
2323
raise DidNotEnable("Anthropic not installed")
2424

25-
2625
if TYPE_CHECKING:
2726
from typing import Any, Iterator
2827
from sentry_sdk.tracing import Span
@@ -48,6 +47,7 @@ def setup_once():
4847
raise DidNotEnable("anthropic 0.16 or newer required.")
4948

5049
Messages.create = _wrap_message_create(Messages.create)
50+
AsyncMessages.create = _wrap_async_message_create(AsyncMessages.create)
5151

5252

5353
def _capture_exception(exc):
@@ -75,105 +75,119 @@ def _calculate_token_usage(result, span):
7575
record_token_usage(span, input_tokens, output_tokens, total_tokens)
7676

7777

78+
def _sentry_patched_create_common(f, *args, **kwargs):
79+
# type: (Any, *Any, **Any) -> Any
80+
if "messages" not in kwargs:
81+
return f(*args, **kwargs)
82+
83+
try:
84+
iter(kwargs["messages"])
85+
except TypeError:
86+
return f(*args, **kwargs)
87+
88+
messages = list(kwargs["messages"])
89+
model = kwargs.get("model")
90+
91+
span = sentry_sdk.start_span(
92+
op=OP.ANTHROPIC_MESSAGES_CREATE,
93+
description="Anthropic messages create",
94+
origin=AnthropicIntegration.origin,
95+
)
96+
span.__enter__()
97+
98+
try:
99+
result = f(*args, **kwargs)
100+
except Exception as exc:
101+
_capture_exception(exc)
102+
span.__exit__(None, None, None)
103+
raise exc from None
104+
105+
integration = sentry_sdk.get_client().get_integration(AnthropicIntegration)
106+
107+
with capture_internal_exceptions():
108+
span.set_data(SPANDATA.AI_MODEL_ID, model)
109+
span.set_data(SPANDATA.AI_STREAMING, False)
110+
if should_send_default_pii() and integration.include_prompts:
111+
span.set_data(SPANDATA.AI_INPUT_MESSAGES, messages)
112+
if hasattr(result, "content"):
113+
if should_send_default_pii() and integration.include_prompts:
114+
span.set_data(
115+
SPANDATA.AI_RESPONSES,
116+
list(
117+
map(
118+
lambda message: {
119+
"type": message.type,
120+
"text": message.text,
121+
},
122+
result.content,
123+
)
124+
),
125+
)
126+
_calculate_token_usage(result, span)
127+
span.__exit__(None, None, None)
128+
elif hasattr(result, "_iterator"):
129+
old_iterator = result._iterator
130+
131+
def new_iterator():
132+
# type: () -> Iterator[MessageStreamEvent]
133+
input_tokens = 0
134+
output_tokens = 0
135+
content_blocks = []
136+
with capture_internal_exceptions():
137+
for event in old_iterator:
138+
if hasattr(event, "type"):
139+
if event.type == "message_start":
140+
usage = event.message.usage
141+
input_tokens += usage.input_tokens
142+
output_tokens += usage.output_tokens
143+
elif event.type == "content_block_start":
144+
pass
145+
elif event.type == "content_block_delta":
146+
content_blocks.append(event.delta.text)
147+
elif event.type == "content_block_stop":
148+
pass
149+
elif event.type == "message_delta":
150+
output_tokens += event.usage.output_tokens
151+
elif event.type == "message_stop":
152+
continue
153+
yield event
154+
155+
if should_send_default_pii() and integration.include_prompts:
156+
complete_message = "".join(content_blocks)
157+
span.set_data(
158+
SPANDATA.AI_RESPONSES,
159+
[{"type": "text", "text": complete_message}],
160+
)
161+
total_tokens = input_tokens + output_tokens
162+
record_token_usage(span, input_tokens, output_tokens, total_tokens)
163+
span.set_data(SPANDATA.AI_STREAMING, True)
164+
span.__exit__(None, None, None)
165+
166+
result._iterator = new_iterator()
167+
else:
168+
span.set_data("unknown_response", True)
169+
span.__exit__(None, None, None)
170+
171+
return result
172+
173+
78174
def _wrap_message_create(f):
79175
# type: (Any) -> Any
80176
@wraps(f)
81177
@ensure_integration_enabled(AnthropicIntegration, f)
82-
def _sentry_patched_create(*args, **kwargs):
178+
def _sentry_patched_create_sync(*args, **kwargs):
83179
# type: (*Any, **Any) -> Any
84-
if "messages" not in kwargs:
85-
return f(*args, **kwargs)
86-
87-
try:
88-
iter(kwargs["messages"])
89-
except TypeError:
90-
return f(*args, **kwargs)
91-
92-
messages = list(kwargs["messages"])
93-
model = kwargs.get("model")
94-
95-
span = sentry_sdk.start_span(
96-
op=OP.ANTHROPIC_MESSAGES_CREATE,
97-
description="Anthropic messages create",
98-
origin=AnthropicIntegration.origin,
99-
)
100-
span.__enter__()
101-
102-
try:
103-
result = f(*args, **kwargs)
104-
except Exception as exc:
105-
_capture_exception(exc)
106-
span.__exit__(None, None, None)
107-
raise exc from None
180+
return _sentry_patched_create_common(f, *args, **kwargs)
108181

109-
integration = sentry_sdk.get_client().get_integration(AnthropicIntegration)
110-
111-
with capture_internal_exceptions():
112-
span.set_data(SPANDATA.AI_MODEL_ID, model)
113-
span.set_data(SPANDATA.AI_STREAMING, False)
114-
if should_send_default_pii() and integration.include_prompts:
115-
span.set_data(SPANDATA.AI_INPUT_MESSAGES, messages)
116-
if hasattr(result, "content"):
117-
if should_send_default_pii() and integration.include_prompts:
118-
span.set_data(
119-
SPANDATA.AI_RESPONSES,
120-
list(
121-
map(
122-
lambda message: {
123-
"type": message.type,
124-
"text": message.text,
125-
},
126-
result.content,
127-
)
128-
),
129-
)
130-
_calculate_token_usage(result, span)
131-
span.__exit__(None, None, None)
132-
elif hasattr(result, "_iterator"):
133-
old_iterator = result._iterator
134-
135-
def new_iterator():
136-
# type: () -> Iterator[MessageStreamEvent]
137-
input_tokens = 0
138-
output_tokens = 0
139-
content_blocks = []
140-
with capture_internal_exceptions():
141-
for event in old_iterator:
142-
if hasattr(event, "type"):
143-
if event.type == "message_start":
144-
usage = event.message.usage
145-
input_tokens += usage.input_tokens
146-
output_tokens += usage.output_tokens
147-
elif event.type == "content_block_start":
148-
pass
149-
elif event.type == "content_block_delta":
150-
content_blocks.append(event.delta.text)
151-
elif event.type == "content_block_stop":
152-
pass
153-
elif event.type == "message_delta":
154-
output_tokens += event.usage.output_tokens
155-
elif event.type == "message_stop":
156-
continue
157-
yield event
158-
159-
if should_send_default_pii() and integration.include_prompts:
160-
complete_message = "".join(content_blocks)
161-
span.set_data(
162-
SPANDATA.AI_RESPONSES,
163-
[{"type": "text", "text": complete_message}],
164-
)
165-
total_tokens = input_tokens + output_tokens
166-
record_token_usage(
167-
span, input_tokens, output_tokens, total_tokens
168-
)
169-
span.set_data(SPANDATA.AI_STREAMING, True)
170-
span.__exit__(None, None, None)
182+
return _sentry_patched_create_sync
171183

172-
result._iterator = new_iterator()
173-
else:
174-
span.set_data("unknown_response", True)
175-
span.__exit__(None, None, None)
176184

177-
return result
185+
def _wrap_async_message_create(f):
186+
# type: (Any) -> Any
187+
@wraps(f)
188+
@ensure_integration_enabled(AnthropicIntegration, f)
189+
async def _sentry_patched_create_async(*args, **kwargs):
190+
# type: (*Any, **Any) -> Any
191+
return await _sentry_patched_create_common(f, *args, **kwargs)
178192

179-
return _sentry_patched_create
193+
return _sentry_patched_create_async

0 commit comments

Comments
 (0)