Skip to content

Commit 21e4c47

Browse files
authored
feat(ai): add input tokens cache write (#217)
Add the new attribute gen_ai.usage.input_tokens.cache_write to record cached writes Co-Authored-By: Claude Sonnet 4.5 noreply@anthropic.com
1 parent 8cf36ce commit 21e4c47

File tree

6 files changed

+77
-1
lines changed

6 files changed

+77
-1
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
#### Changes to attributes
88

9+
- feat(ai): Add gen_ai.usage.input_tokens.cache_write ([#217](https://github.com/getsentry/sentry-conventions/pull/217))
910
- feat(attributes): Add sentry.normalized_db_query.hash ([#200](https://github.com/getsentry/sentry-conventions/pull/200))
1011

1112
## 0.3.1

generated/attributes/all.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
This page lists all available attributes across all categories.
66

7-
Total attributes: 413
7+
Total attributes: 414
88

99
## Stable Attributes
1010

@@ -110,6 +110,7 @@ Total attributes: 413
110110
| [`gen_ai.tool.output`](./gen_ai.md#gen_aitooloutput) | The output of the tool being used. It has to be a stringified version of the output of the tool. |
111111
| [`gen_ai.tool.type`](./gen_ai.md#gen_aitooltype) | The type of tool being used. |
112112
| [`gen_ai.usage.input_tokens`](./gen_ai.md#gen_aiusageinput_tokens) | The number of tokens used to process the AI input (prompt) without cached input tokens. |
113+
| [`gen_ai.usage.input_tokens.cache_write`](./gen_ai.md#gen_aiusageinput_tokenscache_write) | The number of tokens written to the cache when processing the AI input (prompt). |
113114
| [`gen_ai.usage.input_tokens.cached`](./gen_ai.md#gen_aiusageinput_tokenscached) | The number of cached tokens used to process the AI input (prompt). |
114115
| [`gen_ai.usage.output_tokens`](./gen_ai.md#gen_aiusageoutput_tokens) | The number of tokens used for creating the AI output (without reasoning tokens). |
115116
| [`gen_ai.usage.output_tokens.reasoning`](./gen_ai.md#gen_aiusageoutput_tokensreasoning) | The number of tokens used for reasoning to create the AI output. |

generated/attributes/gen_ai.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
- [gen_ai.tool.output](#gen_aitooloutput)
4040
- [gen_ai.tool.type](#gen_aitooltype)
4141
- [gen_ai.usage.input_tokens](#gen_aiusageinput_tokens)
42+
- [gen_ai.usage.input_tokens.cache_write](#gen_aiusageinput_tokenscache_write)
4243
- [gen_ai.usage.input_tokens.cached](#gen_aiusageinput_tokenscached)
4344
- [gen_ai.usage.output_tokens](#gen_aiusageoutput_tokens)
4445
- [gen_ai.usage.output_tokens.reasoning](#gen_aiusageoutput_tokensreasoning)
@@ -462,6 +463,17 @@ The number of tokens used to process the AI input (prompt) without cached input
462463
| Example | `10` |
463464
| Aliases | `ai.prompt_tokens.used`, `gen_ai.usage.prompt_tokens` |
464465

466+
### gen_ai.usage.input_tokens.cache_write
467+
468+
The number of tokens written to the cache when processing the AI input (prompt).
469+
470+
| Property | Value |
471+
| --- | --- |
472+
| Type | `integer` |
473+
| Has PII | false |
474+
| Exists in OpenTelemetry | No |
475+
| Example | `100` |
476+
465477
### gen_ai.usage.input_tokens.cached
466478

467479
The number of cached tokens used to process the AI input (prompt).

javascript/sentry-conventions/src/attributes.ts

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2819,6 +2819,26 @@ export const GEN_AI_USAGE_INPUT_TOKENS_CACHED = 'gen_ai.usage.input_tokens.cache
28192819
*/
28202820
export type GEN_AI_USAGE_INPUT_TOKENS_CACHED_TYPE = number;
28212821

2822+
// Path: model/attributes/gen_ai/gen_ai__usage__input_tokens__cache_write.json
2823+
2824+
/**
2825+
* The number of tokens written to the cache when processing the AI input (prompt). `gen_ai.usage.input_tokens.cache_write`
2826+
*
2827+
* Attribute Value Type: `number` {@link GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_TYPE}
2828+
*
2829+
* Contains PII: false
2830+
*
2831+
* Attribute defined in OTEL: No
2832+
*
2833+
* @example 100
2834+
*/
2835+
export const GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE = 'gen_ai.usage.input_tokens.cache_write';
2836+
2837+
/**
2838+
* Type for {@link GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE} gen_ai.usage.input_tokens.cache_write
2839+
*/
2840+
export type GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_TYPE = number;
2841+
28222842
// Path: model/attributes/gen_ai/gen_ai__usage__output_tokens.json
28232843

28242844
/**
@@ -8757,6 +8777,7 @@ export const ATTRIBUTE_TYPE: Record<string, AttributeType> = {
87578777
[GEN_AI_USAGE_COMPLETION_TOKENS]: 'integer',
87588778
[GEN_AI_USAGE_INPUT_TOKENS]: 'integer',
87598779
[GEN_AI_USAGE_INPUT_TOKENS_CACHED]: 'integer',
8780+
[GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE]: 'integer',
87608781
[GEN_AI_USAGE_OUTPUT_TOKENS]: 'integer',
87618782
[GEN_AI_USAGE_OUTPUT_TOKENS_REASONING]: 'integer',
87628783
[GEN_AI_USAGE_PROMPT_TOKENS]: 'integer',
@@ -9173,6 +9194,7 @@ export type AttributeName =
91739194
| typeof GEN_AI_USAGE_COMPLETION_TOKENS
91749195
| typeof GEN_AI_USAGE_INPUT_TOKENS
91759196
| typeof GEN_AI_USAGE_INPUT_TOKENS_CACHED
9197+
| typeof GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE
91769198
| typeof GEN_AI_USAGE_OUTPUT_TOKENS
91779199
| typeof GEN_AI_USAGE_OUTPUT_TOKENS_REASONING
91789200
| typeof GEN_AI_USAGE_PROMPT_TOKENS
@@ -10873,6 +10895,15 @@ export const ATTRIBUTE_METADATA: Record<AttributeName, AttributeMetadata> = {
1087310895
isInOtel: false,
1087410896
example: 50,
1087510897
},
10898+
[GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE]: {
10899+
brief: 'The number of tokens written to the cache when processing the AI input (prompt).',
10900+
type: 'integer',
10901+
pii: {
10902+
isPii: 'false',
10903+
},
10904+
isInOtel: false,
10905+
example: 100,
10906+
},
1087610907
[GEN_AI_USAGE_OUTPUT_TOKENS]: {
1087710908
brief: 'The number of tokens used for creating the AI output (without reasoning tokens).',
1087810909
type: 'integer',
@@ -13812,6 +13843,7 @@ export type Attributes = {
1381213843
[GEN_AI_USAGE_COMPLETION_TOKENS]?: GEN_AI_USAGE_COMPLETION_TOKENS_TYPE;
1381313844
[GEN_AI_USAGE_INPUT_TOKENS]?: GEN_AI_USAGE_INPUT_TOKENS_TYPE;
1381413845
[GEN_AI_USAGE_INPUT_TOKENS_CACHED]?: GEN_AI_USAGE_INPUT_TOKENS_CACHED_TYPE;
13846+
[GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE]?: GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_TYPE;
1381513847
[GEN_AI_USAGE_OUTPUT_TOKENS]?: GEN_AI_USAGE_OUTPUT_TOKENS_TYPE;
1381613848
[GEN_AI_USAGE_OUTPUT_TOKENS_REASONING]?: GEN_AI_USAGE_OUTPUT_TOKENS_REASONING_TYPE;
1381713849
[GEN_AI_USAGE_PROMPT_TOKENS]?: GEN_AI_USAGE_PROMPT_TOKENS_TYPE;
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
{
2+
"key": "gen_ai.usage.input_tokens.cache_write",
3+
"brief": "The number of tokens written to the cache when processing the AI input (prompt).",
4+
"type": "integer",
5+
"pii": {
6+
"key": "false"
7+
},
8+
"is_in_otel": false,
9+
"example": 100
10+
}

python/src/sentry_conventions/attributes.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1639,6 +1639,18 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta):
16391639
Example: 10
16401640
"""
16411641

1642+
# Path: model/attributes/gen_ai/gen_ai__usage__input_tokens__cache_write.json
1643+
GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE: Literal[
1644+
"gen_ai.usage.input_tokens.cache_write"
1645+
] = "gen_ai.usage.input_tokens.cache_write"
1646+
"""The number of tokens written to the cache when processing the AI input (prompt).
1647+
1648+
Type: int
1649+
Contains PII: false
1650+
Defined in OTEL: No
1651+
Example: 100
1652+
"""
1653+
16421654
# Path: model/attributes/gen_ai/gen_ai__usage__input_tokens__cached.json
16431655
GEN_AI_USAGE_INPUT_TOKENS_CACHED: Literal["gen_ai.usage.input_tokens.cached"] = (
16441656
"gen_ai.usage.input_tokens.cached"
@@ -5765,6 +5777,13 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta):
57655777
example=10,
57665778
aliases=["ai.prompt_tokens.used", "gen_ai.usage.prompt_tokens"],
57675779
),
5780+
"gen_ai.usage.input_tokens.cache_write": AttributeMetadata(
5781+
brief="The number of tokens written to the cache when processing the AI input (prompt).",
5782+
type=AttributeType.INTEGER,
5783+
pii=PiiInfo(isPii=IsPii.FALSE),
5784+
is_in_otel=False,
5785+
example=100,
5786+
),
57685787
"gen_ai.usage.input_tokens.cached": AttributeMetadata(
57695788
brief="The number of cached tokens used to process the AI input (prompt).",
57705789
type=AttributeType.INTEGER,
@@ -8070,6 +8089,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta):
80708089
"gen_ai.tool.type": str,
80718090
"gen_ai.usage.completion_tokens": int,
80728091
"gen_ai.usage.input_tokens": int,
8092+
"gen_ai.usage.input_tokens.cache_write": int,
80738093
"gen_ai.usage.input_tokens.cached": int,
80748094
"gen_ai.usage.output_tokens": int,
80758095
"gen_ai.usage.output_tokens.reasoning": int,

0 commit comments

Comments
 (0)