Skip to content

Commit 477a03c

Browse files
committed
Merge branch 'antonpirker/openai-otel-2' into antonpirker/openai-pipeline-streaming
2 parents e04174e + eb629de commit 477a03c

File tree

1 file changed

+16
-14
lines changed

1 file changed

+16
-14
lines changed

sentry_sdk/integrations/openai.py

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
try:
3333
# responses API support was introduced in v1.66.0
3434
from openai.resources.responses import Responses, AsyncResponses
35+
from openai.types.responses.response_completed_event import ResponseCompletedEvent
3536
except ImportError:
3637
RESPONSES_API_ENABLED = False
3738

@@ -99,7 +100,7 @@ def _get_usage(usage, names):
99100
def _calculate_token_usage(
100101
messages, response, span, streaming_message_responses, count_tokens
101102
):
102-
# type: (Iterable[ChatCompletionMessageParam], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
103+
# type: (Optional[Iterable[ChatCompletionMessageParam]], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
103104
input_tokens = 0 # type: Optional[int]
104105
input_tokens_cached = 0 # type: Optional[int]
105106
output_tokens = 0 # type: Optional[int]
@@ -125,7 +126,7 @@ def _calculate_token_usage(
125126

126127
# Manually count tokens
127128
if input_tokens == 0:
128-
for message in messages:
129+
for message in messages or []:
129130
if isinstance(message, dict) and "content" in message:
130131
input_tokens += count_tokens(message["content"])
131132
elif isinstance(message, str):
@@ -160,8 +161,8 @@ def _calculate_token_usage(
160161
def _set_input_data(span, kwargs, operation, integration):
161162
# type: (Span, dict[str, Any], str, OpenAIIntegration) -> None
162163
# Input messages (the prompt or data sent to the model)
163-
messages = kwargs.get("messages", [])
164-
if messages == []:
164+
messages = kwargs.get("messages")
165+
if messages is None:
165166
messages = kwargs.get("input")
166167

167168
if isinstance(messages, str):
@@ -195,7 +196,7 @@ def _set_input_data(span, kwargs, operation, integration):
195196
set_data_normalized(span, attribute, value)
196197

197198
# Input attributes: Tools
198-
tools = kwargs.get("tools", [])
199+
tools = kwargs.get("tools")
199200
if tools is not None and len(tools) > 0:
200201
set_data_normalized(
201202
span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
@@ -209,11 +210,11 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
209210

210211
# Input messages (the prompt or data sent to the model)
211212
# used for the token usage calculation
212-
messages = kwargs.get("messages", [])
213-
if messages == []:
213+
messages = kwargs.get("messages")
214+
if messages is None:
214215
messages = kwargs.get("input")
215216

216-
if isinstance(messages, str):
217+
if messages is not None and isinstance(messages, str):
217218
messages = [messages]
218219

219220
if hasattr(response, "choices"):
@@ -225,6 +226,7 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
225226
SPANDATA.GEN_AI_RESPONSE_TEXT,
226227
safe_serialize(response_text),
227228
)
229+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
228230
if finish_span:
229231
span.__exit__(None, None, None)
230232

@@ -237,6 +239,7 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True):
237239
SPANDATA.GEN_AI_RESPONSE_TEXT,
238240
safe_serialize(response_text),
239241
)
242+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
240243
if finish_span:
241244
span.__exit__(None, None, None)
242245

@@ -270,7 +273,7 @@ def new_iterator():
270273
data_buf[0].append(x.delta or "")
271274

272275
# OpenAI responses API end of streaming response
273-
if x.__class__.__name__ == "ResponseCompletedEvent":
276+
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
274277
_calculate_token_usage(
275278
messages,
276279
x.response,
@@ -283,7 +286,7 @@ def new_iterator():
283286
yield x
284287

285288
if len(data_buf) > 0:
286-
all_responses = list(map(lambda chunk: "".join(chunk), data_buf))
289+
all_responses = ["".join(chunk) for chunk in data_buf]
287290
if should_send_default_pii() and integration.include_prompts:
288291
set_data_normalized(
289292
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
@@ -325,7 +328,7 @@ async def new_iterator_async():
325328
data_buf[0].append(x.delta or "")
326329

327330
# OpenAI responses API end of streaming response
328-
if x.__class__.__name__ == "ResponseCompletedEvent":
331+
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
329332
_calculate_token_usage(
330333
messages,
331334
x.response,
@@ -338,7 +341,7 @@ async def new_iterator_async():
338341
yield x
339342

340343
if len(data_buf) > 0:
341-
all_responses = list(map(lambda chunk: "".join(chunk), data_buf))
344+
all_responses = ["".join(chunk) for chunk in data_buf]
342345
if should_send_default_pii() and integration.include_prompts:
343346
set_data_normalized(
344347
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
@@ -359,11 +362,10 @@ async def new_iterator_async():
359362
else:
360363
response._iterator = new_iterator()
361364
else:
365+
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
362366
if finish_span:
363367
span.__exit__(None, None, None)
364368

365-
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
366-
367369

368370
def _new_chat_completion_common(f, *args, **kwargs):
369371
# type: (Any, Any, Any) -> Any

0 commit comments

Comments
 (0)