Skip to content

Commit 6c1c97d

Browse files
committed
Moved duplicated code into functions
1 parent a175ba1 commit 6c1c97d

File tree

1 file changed

+63
-69
lines changed

1 file changed

+63
-69
lines changed

sentry_sdk/integrations/anthropic.py

Lines changed: 63 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,9 @@ def _calculate_token_usage(result, span):
7575

7676
def _get_responses(content):
7777
# type: (list[Any]) -> list[dict[str, Any]]
78-
"""Get JSON of a Anthropic responses."""
78+
"""
79+
Get JSON of a Anthropic responses.
80+
"""
7981
responses = []
8082
for item in content:
8183
if hasattr(item, "text"):
@@ -88,11 +90,48 @@ def _get_responses(content):
8890
return responses
8991

9092

93+
def _collect_ai_data(event, input_tokens, output_tokens, content_blocks):
94+
"""
95+
Count token usage and collect content blocks from the AI stream.
96+
"""
97+
with capture_internal_exceptions():
98+
if hasattr(event, "type"):
99+
if event.type == "message_start":
100+
usage = event.message.usage
101+
input_tokens += usage.input_tokens
102+
output_tokens += usage.output_tokens
103+
elif event.type == "content_block_start":
104+
pass
105+
elif event.type == "content_block_delta":
106+
if hasattr(event.delta, "text"):
107+
content_blocks.append(event.delta.text)
108+
elif event.type == "content_block_stop":
109+
pass
110+
elif event.type == "message_delta":
111+
output_tokens += event.usage.output_tokens
112+
113+
114+
def _add_ai_data_to_span(
115+
span, integration, content_blocks, input_tokens, output_tokens
116+
):
117+
"""
118+
Add token usage and content blocks to the span.
119+
"""
120+
with capture_internal_exceptions():
121+
if should_send_default_pii() and integration.include_prompts:
122+
complete_message = "".join(content_blocks)
123+
span.set_data(
124+
SPANDATA.AI_RESPONSES,
125+
[{"type": "text", "text": complete_message}],
126+
)
127+
total_tokens = input_tokens + output_tokens
128+
record_token_usage(span, input_tokens, output_tokens, total_tokens)
129+
span.set_data(SPANDATA.AI_STREAMING, True)
130+
131+
91132
def _sentry_patched_create_common(f, *args, **kwargs):
92133
# type: (Any, *Any, **Any) -> Any
93-
94-
# check requirements
95-
integration = sentry_sdk.get_client().get_integration(AnthropicIntegration)
134+
integration = kwargs.pop("integration")
96135
if integration is None:
97136
return f(*args, **kwargs)
98137

@@ -104,15 +143,13 @@ def _sentry_patched_create_common(f, *args, **kwargs):
104143
except TypeError:
105144
return f(*args, **kwargs)
106145

107-
# start span
108146
span = sentry_sdk.start_span(
109147
op=OP.ANTHROPIC_MESSAGES_CREATE,
110148
description="Anthropic messages create",
111149
origin=AnthropicIntegration.origin,
112150
)
113151
span.__enter__()
114152

115-
# yield generator
116153
try:
117154
result = yield f, args, kwargs
118155
except Exception as exc:
@@ -130,7 +167,7 @@ def _sentry_patched_create_common(f, *args, **kwargs):
130167

131168
if should_send_default_pii() and integration.include_prompts:
132169
span.set_data(SPANDATA.AI_INPUT_MESSAGES, messages)
133-
170+
134171
if hasattr(result, "content"):
135172
if should_send_default_pii() and integration.include_prompts:
136173
span.set_data(SPANDATA.AI_RESPONSES, _get_responses(result.content))
@@ -140,77 +177,36 @@ def _sentry_patched_create_common(f, *args, **kwargs):
140177
elif hasattr(result, "_iterator"):
141178
old_iterator = result._iterator
142179

143-
async def new_iterator_async():
144-
# type: () -> AsyncIterator[MessageStreamEvent]
180+
def new_iterator():
181+
# type: () -> Iterator[MessageStreamEvent]
145182
input_tokens = 0
146183
output_tokens = 0
147184
content_blocks = []
148-
with capture_internal_exceptions():
149-
async for event in old_iterator:
150-
if hasattr(event, "type"):
151-
if event.type == "message_start":
152-
usage = event.message.usage
153-
input_tokens += usage.input_tokens
154-
output_tokens += usage.output_tokens
155-
elif event.type == "content_block_start":
156-
pass
157-
elif event.type == "content_block_delta":
158-
if hasattr(event.delta, "text"):
159-
content_blocks.append(event.delta.text)
160-
elif event.type == "content_block_stop":
161-
pass
162-
elif event.type == "message_delta":
163-
output_tokens += event.usage.output_tokens
164-
elif event.type == "message_stop":
165-
continue
166185

186+
for event in old_iterator:
187+
_collect_ai_data(event, input_tokens, output_tokens, content_blocks)
188+
if event.type != "message_stop":
167189
yield event
168190

169-
if should_send_default_pii() and integration.include_prompts:
170-
complete_message = "".join(content_blocks)
171-
span.set_data(
172-
SPANDATA.AI_RESPONSES,
173-
[{"type": "text", "text": complete_message}],
174-
)
175-
total_tokens = input_tokens + output_tokens
176-
record_token_usage(span, input_tokens, output_tokens, total_tokens)
177-
span.set_data(SPANDATA.AI_STREAMING, True)
191+
_add_ai_data_to_span(
192+
span, integration, content_blocks, input_tokens, output_tokens
193+
)
178194
span.__exit__(None, None, None)
179195

180-
def new_iterator():
181-
# type: () -> Iterator[MessageStreamEvent]
196+
async def new_iterator_async():
197+
# type: () -> AsyncIterator[MessageStreamEvent]
182198
input_tokens = 0
183199
output_tokens = 0
184200
content_blocks = []
185-
with capture_internal_exceptions():
186-
for event in old_iterator:
187-
if hasattr(event, "type"):
188-
if event.type == "message_start":
189-
usage = event.message.usage
190-
input_tokens += usage.input_tokens
191-
output_tokens += usage.output_tokens
192-
elif event.type == "content_block_start":
193-
pass
194-
elif event.type == "content_block_delta":
195-
if hasattr(event.delta, "text"):
196-
content_blocks.append(event.delta.text)
197-
elif event.type == "content_block_stop":
198-
pass
199-
elif event.type == "message_delta":
200-
output_tokens += event.usage.output_tokens
201-
elif event.type == "message_stop":
202-
continue
201+
202+
async for event in old_iterator:
203+
_collect_ai_data(event, input_tokens, output_tokens, content_blocks)
204+
if event.type != "message_stop":
203205
yield event
204206

205-
if should_send_default_pii() and integration.include_prompts:
206-
complete_message = "".join(content_blocks)
207-
span.set_data(
208-
SPANDATA.AI_RESPONSES,
209-
[{"type": "text", "text": complete_message}],
210-
)
211-
total_tokens = input_tokens + output_tokens
212-
record_token_usage(span, input_tokens, output_tokens, total_tokens)
213-
span.set_data(SPANDATA.AI_STREAMING, True)
207+
_add_ai_data_to_span(
208+
span, integration, content_blocks, input_tokens, output_tokens
209+
)
214210
span.__exit__(None, None, None)
215211

216212
if str(type(result._iterator)) == "<class 'async_generator'>":
@@ -246,8 +242,7 @@ def _execute_sync(f, *args, **kwargs):
246242
def _sentry_patched_create_sync(*args, **kwargs):
247243
# type: (*Any, **Any) -> Any
248244
integration = sentry_sdk.get_client().get_integration(AnthropicIntegration)
249-
if integration is None or "messages" not in kwargs:
250-
return f(*args, **kwargs)
245+
kwargs["integration"] = integration
251246

252247
return _execute_sync(f, *args, **kwargs)
253248

@@ -275,8 +270,7 @@ async def _execute_async(f, *args, **kwargs):
275270
async def _sentry_patched_create_async(*args, **kwargs):
276271
# type: (*Any, **Any) -> Any
277272
integration = sentry_sdk.get_client().get_integration(AnthropicIntegration)
278-
if integration is None or "messages" not in kwargs:
279-
return await f(*args, **kwargs)
273+
kwargs["integration"] = integration
280274

281275
return await _execute_async(f, *args, **kwargs)
282276

0 commit comments

Comments
 (0)