Skip to content

Commit 1fe97c9

Browse files
committed
updated tests
1 parent f864566 commit 1fe97c9

File tree

3 files changed

+28
-21
lines changed

3 files changed

+28
-21
lines changed

sentry_sdk/ai/monitoring.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,5 +128,8 @@ def record_token_usage(
128128
output_tokens_reasoning,
129129
)
130130

131+
if total_tokens is None and input_tokens is not None and output_tokens is not None:
132+
total_tokens = input_tokens + output_tokens
133+
131134
if total_tokens is not None:
132135
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens)

sentry_sdk/integrations/openai.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -366,22 +366,26 @@ def _new_embeddings_create_common(f, *args, **kwargs):
366366

367367
response = yield f, args, kwargs
368368

369-
prompt_tokens = 0
369+
input_tokens = 0
370370
total_tokens = 0
371371
if hasattr(response, "usage"):
372372
if hasattr(response.usage, "prompt_tokens") and isinstance(
373373
response.usage.prompt_tokens, int
374374
):
375-
prompt_tokens = response.usage.prompt_tokens
375+
input_tokens = response.usage.prompt_tokens
376376
if hasattr(response.usage, "total_tokens") and isinstance(
377377
response.usage.total_tokens, int
378378
):
379379
total_tokens = response.usage.total_tokens
380380

381-
if prompt_tokens == 0:
382-
prompt_tokens = integration.count_tokens(kwargs["input"] or "")
381+
if input_tokens == 0:
382+
input_tokens = integration.count_tokens(kwargs["input"] or "")
383383

384-
record_token_usage(span, prompt_tokens, None, total_tokens or prompt_tokens)
384+
record_token_usage(
385+
span,
386+
input_tokens=input_tokens,
387+
total_tokens=total_tokens or input_tokens,
388+
)
385389

386390
return response
387391

tests/integrations/openai/test_openai.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -90,9 +90,9 @@ def test_nonstreaming_chat_completion(
9090
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
9191
assert SPANDATA.AI_RESPONSES not in span["data"]
9292

93-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
94-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
95-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
93+
assert span["data"]["gen_ai.usage.output_tokens"] == 10
94+
assert span["data"]["gen_ai.usage.input_tokens"] == 20
95+
assert span["data"]["gen_ai.usage.total_tokens"] == 30
9696

9797

9898
@pytest.mark.asyncio
@@ -132,9 +132,9 @@ async def test_nonstreaming_chat_completion_async(
132132
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
133133
assert SPANDATA.AI_RESPONSES not in span["data"]
134134

135-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
136-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
137-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
135+
assert span["data"]["gen_ai.usage.output_tokens"] == 10
136+
assert span["data"]["gen_ai.usage.input_tokens"] == 20
137+
assert span["data"]["gen_ai.usage.total_tokens"] == 30
138138

139139

140140
def tiktoken_encoding_if_installed():
@@ -228,9 +228,9 @@ def test_streaming_chat_completion(
228228
try:
229229
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
230230

231-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 2
232-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 1
233-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 3
231+
assert span["data"]["gen_ai.usage.output_tokens"] == 2
232+
assert span["data"]["gen_ai.usage.input_tokens"] == 1
233+
assert span["data"]["gen_ai.usage.total_tokens"] == 3
234234
except ImportError:
235235
pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly
236236

@@ -324,9 +324,9 @@ async def test_streaming_chat_completion_async(
324324
try:
325325
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
326326

327-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 2
328-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 1
329-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 3
327+
assert span["data"]["gen_ai.usage.output_tokens"] == 2
328+
assert span["data"]["gen_ai.usage.input_tokens"] == 1
329+
assert span["data"]["gen_ai.usage.total_tokens"] == 3
330330
except ImportError:
331331
pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly
332332

@@ -409,8 +409,8 @@ def test_embeddings_create(
409409
else:
410410
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
411411

412-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
413-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
412+
assert span["data"]["gen_ai.usage.input_tokens"] == 20
413+
assert span["data"]["gen_ai.usage.total_tokens"] == 30
414414

415415

416416
@pytest.mark.asyncio
@@ -457,8 +457,8 @@ async def test_embeddings_create_async(
457457
else:
458458
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
459459

460-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
461-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
460+
assert span["data"]["gen_ai.usage.input_tokens"] == 20
461+
assert span["data"]["gen_ai.usage.total_tokens"] == 30
462462

463463

464464
@pytest.mark.parametrize(

0 commit comments

Comments
 (0)