Skip to content

Commit 6fe2041

Browse files
authored
fix(gen_ai): input and output token description (#261)
1 parent cfc2361 commit 6fe2041

File tree

4 files changed

+10
-10
lines changed

4 files changed

+10
-10
lines changed

javascript/sentry-conventions/src/attributes.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3028,7 +3028,7 @@ export type GEN_AI_USAGE_COMPLETION_TOKENS_TYPE = number;
30283028
// Path: model/attributes/gen_ai/gen_ai__usage__input_tokens.json
30293029

30303030
/**
3031-
* The number of tokens used to process the AI input (prompt) without cached input tokens. `gen_ai.usage.input_tokens`
3031+
* The number of tokens used to process the AI input (prompt) including cached input tokens. `gen_ai.usage.input_tokens`
30323032
*
30333033
* Attribute Value Type: `number` {@link GEN_AI_USAGE_INPUT_TOKENS_TYPE}
30343034
*
@@ -3090,7 +3090,7 @@ export type GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_TYPE = number;
30903090
// Path: model/attributes/gen_ai/gen_ai__usage__output_tokens.json
30913091

30923092
/**
3093-
* The number of tokens used for creating the AI output (without reasoning tokens). `gen_ai.usage.output_tokens`
3093+
* The number of tokens used for creating the AI output (including reasoning tokens). `gen_ai.usage.output_tokens`
30943094
*
30953095
* Attribute Value Type: `number` {@link GEN_AI_USAGE_OUTPUT_TOKENS_TYPE}
30963096
*
@@ -11278,7 +11278,7 @@ export const ATTRIBUTE_METADATA: Record<AttributeName, AttributeMetadata> = {
1127811278
aliases: [AI_COMPLETION_TOKENS_USED, GEN_AI_USAGE_OUTPUT_TOKENS],
1127911279
},
1128011280
[GEN_AI_USAGE_INPUT_TOKENS]: {
11281-
brief: 'The number of tokens used to process the AI input (prompt) without cached input tokens.',
11281+
brief: 'The number of tokens used to process the AI input (prompt) including cached input tokens.',
1128211282
type: 'integer',
1128311283
pii: {
1128411284
isPii: 'maybe',
@@ -11306,7 +11306,7 @@ export const ATTRIBUTE_METADATA: Record<AttributeName, AttributeMetadata> = {
1130611306
example: 100,
1130711307
},
1130811308
[GEN_AI_USAGE_OUTPUT_TOKENS]: {
11309-
brief: 'The number of tokens used for creating the AI output (without reasoning tokens).',
11309+
brief: 'The number of tokens used for creating the AI output (including reasoning tokens).',
1131011310
type: 'integer',
1131111311
pii: {
1131211312
isPii: 'maybe',

model/attributes/gen_ai/gen_ai__usage__input_tokens.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"key": "gen_ai.usage.input_tokens",
3-
"brief": "The number of tokens used to process the AI input (prompt) without cached input tokens.",
3+
"brief": "The number of tokens used to process the AI input (prompt) including cached input tokens.",
44
"type": "integer",
55
"pii": {
66
"key": "maybe"

model/attributes/gen_ai/gen_ai__usage__output_tokens.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"key": "gen_ai.usage.output_tokens",
3-
"brief": "The number of tokens used for creating the AI output (without reasoning tokens).",
3+
"brief": "The number of tokens used for creating the AI output (including reasoning tokens).",
44
"type": "integer",
55
"pii": {
66
"key": "maybe"

python/src/sentry_conventions/attributes.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1772,7 +1772,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta):
17721772
GEN_AI_USAGE_INPUT_TOKENS: Literal["gen_ai.usage.input_tokens"] = (
17731773
"gen_ai.usage.input_tokens"
17741774
)
1775-
"""The number of tokens used to process the AI input (prompt) without cached input tokens.
1775+
"""The number of tokens used to process the AI input (prompt) including cached input tokens.
17761776
17771777
Type: int
17781778
Contains PII: maybe
@@ -1809,7 +1809,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta):
18091809
GEN_AI_USAGE_OUTPUT_TOKENS: Literal["gen_ai.usage.output_tokens"] = (
18101810
"gen_ai.usage.output_tokens"
18111811
)
1812-
"""The number of tokens used for creating the AI output (without reasoning tokens).
1812+
"""The number of tokens used for creating the AI output (including reasoning tokens).
18131813
18141814
Type: int
18151815
Contains PII: maybe
@@ -6000,7 +6000,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta):
60006000
aliases=["ai.completion_tokens.used", "gen_ai.usage.output_tokens"],
60016001
),
60026002
"gen_ai.usage.input_tokens": AttributeMetadata(
6003-
brief="The number of tokens used to process the AI input (prompt) without cached input tokens.",
6003+
brief="The number of tokens used to process the AI input (prompt) including cached input tokens.",
60046004
type=AttributeType.INTEGER,
60056005
pii=PiiInfo(isPii=IsPii.MAYBE),
60066006
is_in_otel=True,
@@ -6022,7 +6022,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta):
60226022
example=50,
60236023
),
60246024
"gen_ai.usage.output_tokens": AttributeMetadata(
6025-
brief="The number of tokens used for creating the AI output (without reasoning tokens).",
6025+
brief="The number of tokens used for creating the AI output (including reasoning tokens).",
60266026
type=AttributeType.INTEGER,
60276027
pii=PiiInfo(isPii=IsPii.MAYBE),
60286028
is_in_otel=True,

0 commit comments

Comments
 (0)