Skip to content

Commit 287e71e

Browse files
committed
update attr names everywhere
1 parent 9b0629d commit 287e71e

File tree

5 files changed

+51
-51
lines changed

5 files changed

+51
-51
lines changed

tests/integrations/anthropic/test_anthropic.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -127,9 +127,9 @@ def test_nonstreaming_create_message(
127127
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
128128
assert SPANDATA.AI_RESPONSES not in span["data"]
129129

130-
assert span["data"]["ai.prompt_tokens_used"] == 10
131-
assert span["data"]["ai.completion_tokens_used"] == 20
132-
assert span["data"]["ai.total_tokens_used"] == 30
130+
assert span["data"]["ai.prompt_tokens.used"] == 10
131+
assert span["data"]["ai.completion_tokens.used"] == 20
132+
assert span["data"]["ai.total_tokens.used"] == 30
133133
assert span["data"]["ai.streaming"] is False
134134

135135

@@ -197,9 +197,9 @@ async def test_nonstreaming_create_message_async(
197197
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
198198
assert SPANDATA.AI_RESPONSES not in span["data"]
199199

200-
assert span["data"]["ai.prompt_tokens_used"] == 10
201-
assert span["data"]["ai.completion_tokens_used"] == 20
202-
assert span["data"]["ai.total_tokens_used"] == 30
200+
assert span["data"]["ai.prompt_tokens.used"] == 10
201+
assert span["data"]["ai.completion_tokens.used"] == 20
202+
assert span["data"]["ai.total_tokens.used"] == 30
203203
assert span["data"]["ai.streaming"] is False
204204

205205

@@ -299,9 +299,9 @@ def test_streaming_create_message(
299299
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
300300
assert SPANDATA.AI_RESPONSES not in span["data"]
301301

302-
assert span["data"]["ai.prompt_tokens_used"] == 10
303-
assert span["data"]["ai.completion_tokens_used"] == 30
304-
assert span["data"]["ai.total_tokens_used"] == 40
302+
assert span["data"]["ai.prompt_tokens.used"] == 10
303+
assert span["data"]["ai.completion_tokens.used"] == 30
304+
assert span["data"]["ai.total_tokens.used"] == 40
305305
assert span["data"]["ai.streaming"] is True
306306

307307

@@ -404,9 +404,9 @@ async def test_streaming_create_message_async(
404404
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
405405
assert SPANDATA.AI_RESPONSES not in span["data"]
406406

407-
assert span["data"]["ai.prompt_tokens_used"] == 10
408-
assert span["data"]["ai.completion_tokens_used"] == 30
409-
assert span["data"]["ai.total_tokens_used"] == 40
407+
assert span["data"]["ai.prompt_tokens.used"] == 10
408+
assert span["data"]["ai.completion_tokens.used"] == 30
409+
assert span["data"]["ai.total_tokens.used"] == 40
410410
assert span["data"]["ai.streaming"] is True
411411

412412

@@ -536,9 +536,9 @@ def test_streaming_create_message_with_input_json_delta(
536536
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
537537
assert SPANDATA.AI_RESPONSES not in span["data"]
538538

539-
assert span["data"]["ai.prompt_tokens_used"] == 366
540-
assert span["data"]["ai.completion_tokens_used"] == 51
541-
assert span["data"]["ai.total_tokens_used"] == 417
539+
assert span["data"]["ai.prompt_tokens.used"] == 366
540+
assert span["data"]["ai.completion_tokens.used"] == 51
541+
assert span["data"]["ai.total_tokens.used"] == 417
542542
assert span["data"]["ai.streaming"] is True
543543

544544

@@ -675,9 +675,9 @@ async def test_streaming_create_message_with_input_json_delta_async(
675675
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
676676
assert SPANDATA.AI_RESPONSES not in span["data"]
677677

678-
assert span["data"]["ai.prompt_tokens_used"] == 366
679-
assert span["data"]["ai.completion_tokens_used"] == 51
680-
assert span["data"]["ai.total_tokens_used"] == 417
678+
assert span["data"]["ai.prompt_tokens.used"] == 366
679+
assert span["data"]["ai.completion_tokens.used"] == 51
680+
assert span["data"]["ai.total_tokens.used"] == 417
681681
assert span["data"]["ai.streaming"] is True
682682

683683

@@ -831,6 +831,6 @@ def test_add_ai_data_to_span_with_input_json_delta(sentry_init, capture_events):
831831
[{"type": "text", "text": "{'test': 'data','more': 'json'}"}]
832832
)
833833
assert span["data"]["ai.streaming"] is True
834-
assert span["data"]["ai.prompt_tokens_used"] == 10
835-
assert span["data"]["ai.completion_tokens_used"] == 20
836-
assert span["data"]["ai.total_tokens_used"] == 30
834+
assert span["data"]["ai.prompt_tokens.used"] == 10
835+
assert span["data"]["ai.completion_tokens.used"] == 20
836+
assert span["data"]["ai.total_tokens.used"] == 30

tests/integrations/cohere/test_cohere.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,9 @@ def test_nonstreaming_chat(
6464
assert "ai.input_messages" not in span["data"]
6565
assert "ai.responses" not in span["data"]
6666

67-
assert span["data"]["ai.completion_tokens_used"] == 10
68-
assert span["data"]["ai.prompt_tokens_used"] == 20
69-
assert span["data"]["ai.total_tokens_used"] == 30
67+
assert span["data"]["ai.completion_tokens.used"] == 10
68+
assert span["data"]["ai.prompt_tokens.used"] == 20
69+
assert span["data"]["ai.total_tokens.used"] == 30
7070

7171

7272
# noinspection PyTypeChecker
@@ -136,9 +136,9 @@ def test_streaming_chat(sentry_init, capture_events, send_default_pii, include_p
136136
assert "ai.input_messages" not in span["data"]
137137
assert "ai.responses" not in span["data"]
138138

139-
assert span["data"]["ai.completion_tokens_used"] == 10
140-
assert span["data"]["ai.prompt_tokens_used"] == 20
141-
assert span["data"]["ai.total_tokens_used"] == 30
139+
assert span["data"]["ai.completion_tokens.used"] == 10
140+
assert span["data"]["ai.prompt_tokens.used"] == 20
141+
assert span["data"]["ai.total_tokens.used"] == 30
142142

143143

144144
def test_bad_chat(sentry_init, capture_events):
@@ -200,8 +200,8 @@ def test_embed(sentry_init, capture_events, send_default_pii, include_prompts):
200200
else:
201201
assert "ai.input_messages" not in span["data"]
202202

203-
assert span["data"]["ai.prompt_tokens_used"] == 10
204-
assert span["data"]["ai.total_tokens_used"] == 10
203+
assert span["data"]["ai.prompt_tokens.used"] == 10
204+
assert span["data"]["ai.total_tokens.used"] == 10
205205

206206

207207
def test_span_origin_chat(sentry_init, capture_events):

tests/integrations/huggingface_hub/test_huggingface_hub.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def test_nonstreaming_chat_completion(
7474
assert "ai.responses" not in span["data"]
7575

7676
if details_arg:
77-
assert span["data"]["ai.total_tokens_used"] == 10
77+
assert span["data"]["ai.total_tokens.used"] == 10
7878

7979

8080
@pytest.mark.parametrize(
@@ -133,7 +133,7 @@ def test_streaming_chat_completion(
133133
assert "ai.responses" not in span["data"]
134134

135135
if details_arg:
136-
assert span["data"]["ai.total_tokens_used"] == 10
136+
assert span["data"]["ai.total_tokens.used"] == 10
137137

138138

139139
def test_bad_chat_completion(sentry_init, capture_events):

tests/integrations/langchain/test_langchain.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -179,13 +179,13 @@ def test_langchain_agent(
179179
assert len(list(x for x in tx["spans"] if x["op"] == "ai.run.langchain")) > 0
180180

181181
if use_unknown_llm_type:
182-
assert "ai.prompt_tokens_used" in chat_spans[0]["data"]
183-
assert "ai.total_tokens_used" in chat_spans[0]["data"]
182+
assert "ai.prompt_tokens.used" in chat_spans[0]["data"]
183+
assert "ai.total_tokens.used" in chat_spans[0]["data"]
184184
else:
185185
# important: to avoid double counting, we do *not* measure
186186
# tokens used if we have an explicit integration (e.g. OpenAI)
187-
assert "ai.prompt_tokens_used" not in chat_spans[0]["data"]
188-
assert "ai.total_tokens_used" not in chat_spans[0]["data"]
187+
assert "ai.prompt_tokens.used" not in chat_spans[0]["data"]
188+
assert "ai.total_tokens.used" not in chat_spans[0]["data"]
189189

190190
if send_default_pii and include_prompts:
191191
assert "You are very powerful" in chat_spans[0]["data"]["ai.input_messages"]

tests/integrations/openai/test_openai.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,9 @@ def test_nonstreaming_chat_completion(
8989
assert "ai.input_messages" not in span["data"]
9090
assert "ai.responses" not in span["data"]
9191

92-
assert span["data"]["ai.completion_tokens_used"] == 10
93-
assert span["data"]["ai.prompt_tokens_used"] == 20
94-
assert span["data"]["ai.total_tokens_used"] == 30
92+
assert span["data"]["ai.completion_tokens.used"] == 10
93+
assert span["data"]["ai.prompt_tokens.used"] == 20
94+
assert span["data"]["ai.total_tokens.used"] == 30
9595

9696

9797
@pytest.mark.asyncio
@@ -131,9 +131,9 @@ async def test_nonstreaming_chat_completion_async(
131131
assert "ai.input_messages" not in span["data"]
132132
assert "ai.responses" not in span["data"]
133133

134-
assert span["data"]["ai.completion_tokens_used"] == 10
135-
assert span["data"]["ai.prompt_tokens_used"] == 20
136-
assert span["data"]["ai.total_tokens_used"] == 30
134+
assert span["data"]["ai.completion_tokens.used"] == 10
135+
assert span["data"]["ai.prompt_tokens.used"] == 20
136+
assert span["data"]["ai.total_tokens.used"] == 30
137137

138138

139139
def tiktoken_encoding_if_installed():
@@ -227,9 +227,9 @@ def test_streaming_chat_completion(
227227
try:
228228
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
229229

230-
assert span["data"]["ai.completion_tokens_used"] == 2
231-
assert span["data"]["ai.prompt_tokens_used"] == 1
232-
assert span["data"]["ai.total_tokens_used"] == 3
230+
assert span["data"]["ai.completion_tokens.used"] == 2
231+
assert span["data"]["ai.prompt_tokens.used"] == 1
232+
assert span["data"]["ai.total_tokens.used"] == 3
233233
except ImportError:
234234
pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly
235235

@@ -323,9 +323,9 @@ async def test_streaming_chat_completion_async(
323323
try:
324324
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
325325

326-
assert span["data"]["ai.completion_tokens_used"] == 2
327-
assert span["data"]["ai.prompt_tokens_used"] == 1
328-
assert span["data"]["ai.total_tokens_used"] == 3
326+
assert span["data"]["ai.completion_tokens.used"] == 2
327+
assert span["data"]["ai.prompt_tokens.used"] == 1
328+
assert span["data"]["ai.total_tokens.used"] == 3
329329
except ImportError:
330330
pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly
331331

@@ -409,8 +409,8 @@ def test_embeddings_create(
409409
else:
410410
assert "ai.input_messages" not in span["data"]
411411

412-
assert span["data"]["ai.prompt_tokens_used"] == 20
413-
assert span["data"]["ai.total_tokens_used"] == 30
412+
assert span["data"]["ai.prompt_tokens.used"] == 20
413+
assert span["data"]["ai.total_tokens.used"] == 30
414414

415415

416416
@pytest.mark.asyncio
@@ -457,8 +457,8 @@ async def test_embeddings_create_async(
457457
else:
458458
assert "ai.input_messages" not in span["data"]
459459

460-
assert span["data"]["ai.prompt_tokens_used"] == 20
461-
assert span["data"]["ai.total_tokens_used"] == 30
460+
assert span["data"]["ai.prompt_tokens.used"] == 20
461+
assert span["data"]["ai.total_tokens.used"] == 30
462462

463463

464464
@pytest.mark.forked

0 commit comments

Comments
 (0)