diff --git a/CHANGELOG.md b/CHANGELOG.md index 238c5181..b4f95319 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - feat(ai): Add gen_ai.usage.input_tokens.cache_write ([#217](https://github.com/getsentry/sentry-conventions/pull/217)) - feat(attributes): Add sentry.normalized_db_query.hash ([#200](https://github.com/getsentry/sentry-conventions/pull/200)) - feat(attributes): Add sentry.category attribute ([#218](https://github.com/getsentry/sentry-conventions/pull/218)) +- Add new Gen AI attributes ([#221](https://github.com/getsentry/sentry-conventions/pull/221)) ## 0.3.1 @@ -55,12 +56,11 @@ - feat(sentry): Add sentry.observed_timestamp_nanos ([#137](https://github.com/getsentry/sentry-conventions/pull/137)) - dynamic-sampling: add field conventions for dynamic sampling context ([#128](https://github.com/getsentry/sentry-conventions/pull/128)) - chore(ai): Clean up of `sentry._internal.segment.contains_gen_ai_spans` ([#155](https://github.com/getsentry/sentry-conventions/pull/155)) -- feat(attributes): Add sentry._internal.replay_is_buffering ([#159](https://github.com/getsentry/sentry-conventions/pull/159)) +- feat(attributes): Add sentry.\_internal.replay_is_buffering ([#159](https://github.com/getsentry/sentry-conventions/pull/159)) - feat: Add vercel log drain attributes ([#163](https://github.com/getsentry/sentry-conventions/pull/163)) - feat(attributes) add MCP related attributes ([#164](https://github.com/getsentry/sentry-conventions/pull/164)) - feat(attributes): Add MDC log attributes ([#167](https://github.com/getsentry/sentry-conventions/pull/167)) - ### Fixes - fix(name): Remove duplicate GraphQL op ([#152](https://github.com/getsentry/sentry-conventions/pull/152)) diff --git a/generated/attributes/all.md b/generated/attributes/all.md index c8721a56..9fe5ec19 100644 --- a/generated/attributes/all.md +++ b/generated/attributes/all.md @@ -4,7 +4,7 @@ This page lists all available attributes across all categories. -Total attributes: 415 +Total attributes: 421 ## Stable Attributes @@ -81,13 +81,13 @@ Total attributes: 415 | [`gen_ai.cost.output_tokens`](./gen_ai.md#gen_aicostoutput_tokens) | The cost of tokens used for creating the AI output in USD (without reasoning tokens). | | [`gen_ai.cost.total_tokens`](./gen_ai.md#gen_aicosttotal_tokens) | The total cost for the tokens used. | | [`gen_ai.embeddings.input`](./gen_ai.md#gen_aiembeddingsinput) | The input to the embeddings model. | +| [`gen_ai.input.messages`](./gen_ai.md#gen_aiinputmessages) | The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`. | | [`gen_ai.operation.name`](./gen_ai.md#gen_aioperationname) | The name of the operation being performed. | | [`gen_ai.operation.type`](./gen_ai.md#gen_aioperationtype) | The type of AI operation. Must be one of 'agent', 'ai_client', 'tool', 'handoff', 'guardrail'. Makes querying for spans in the UI easier. | +| [`gen_ai.output.messages`](./gen_ai.md#gen_aioutputmessages) | The model's response messages. It has to be a stringified version of an array of message objects, which can include text responses and tool calls. | | [`gen_ai.pipeline.name`](./gen_ai.md#gen_aipipelinename) | Name of the AI pipeline or chain being executed. | -| [`gen_ai.request.available_tools`](./gen_ai.md#gen_airequestavailable_tools) | The available tools for the model. It has to be a stringified version of an array of objects. | | [`gen_ai.request.frequency_penalty`](./gen_ai.md#gen_airequestfrequency_penalty) | Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. | | [`gen_ai.request.max_tokens`](./gen_ai.md#gen_airequestmax_tokens) | The maximum number of tokens to generate in the response. | -| [`gen_ai.request.messages`](./gen_ai.md#gen_airequestmessages) | The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`. | | [`gen_ai.request.model`](./gen_ai.md#gen_airequestmodel) | The model identifier being used for the request. | | [`gen_ai.request.presence_penalty`](./gen_ai.md#gen_airequestpresence_penalty) | Used to reduce repetitiveness of generated tokens. Similar to frequency_penalty, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. | | [`gen_ai.request.seed`](./gen_ai.md#gen_airequestseed) | The seed, ideally models given the same seed and same other parameters will produce the exact same output. | @@ -98,11 +98,12 @@ Total attributes: 415 | [`gen_ai.response.id`](./gen_ai.md#gen_airesponseid) | Unique identifier for the completion. | | [`gen_ai.response.model`](./gen_ai.md#gen_airesponsemodel) | The vendor-specific ID of the model used. | | [`gen_ai.response.streaming`](./gen_ai.md#gen_airesponsestreaming) | Whether or not the AI model call's response was streamed back asynchronously | -| [`gen_ai.response.text`](./gen_ai.md#gen_airesponsetext) | The model's response text messages. It has to be a stringified version of an array of response text messages. | | [`gen_ai.response.tokens_per_second`](./gen_ai.md#gen_airesponsetokens_per_second) | The total output tokens per seconds throughput | -| [`gen_ai.response.tool_calls`](./gen_ai.md#gen_airesponsetool_calls) | The tool calls in the model's response. It has to be a stringified version of an array of objects. | | [`gen_ai.system`](./gen_ai.md#gen_aisystem) | The provider of the model. | -| [`gen_ai.system.message`](./gen_ai.md#gen_aisystemmessage) | The system instructions passed to the model. | +| [`gen_ai.system_instructions`](./gen_ai.md#gen_aisystem_instructions) | The system instructions passed to the model. | +| [`gen_ai.tool.call.arguments`](./gen_ai.md#gen_aitoolcallarguments) | The arguments of the tool call. It has to be a stringified version of the arguments to the tool. | +| [`gen_ai.tool.call.result`](./gen_ai.md#gen_aitoolcallresult) | The result of the tool call. It has to be a stringified version of the result of the tool. | +| [`gen_ai.tool.definitions`](./gen_ai.md#gen_aitooldefinitions) | The list of source system tool definitions available to the GenAI agent or model. | | [`gen_ai.tool.description`](./gen_ai.md#gen_aitooldescription) | The description of the tool being used. | | [`gen_ai.tool.input`](./gen_ai.md#gen_aitoolinput) | The input of the tool being used. It has to be a stringified version of the input to the tool. | | [`gen_ai.tool.message`](./gen_ai.md#gen_aitoolmessage) | The response from a tool or function call passed to the model. | @@ -389,6 +390,11 @@ Total attributes: 415 | [`environment`](./general.md#environment) | [`sentry.environment`](./sentry.md#sentryenvironment) | | [`fs_error`](./general.md#fs_error) | [`error.type`](./error.md#errortype) | | [`gen_ai.prompt`](./gen_ai.md#gen_aiprompt) | No replacement | +| [`gen_ai.request.available_tools`](./gen_ai.md#gen_airequestavailable_tools) | [`gen_ai.tool.definitions`](./gen_ai.md#gen_aitooldefinitions) | +| [`gen_ai.request.messages`](./gen_ai.md#gen_airequestmessages) | [`gen_ai.input.messages`](./gen_ai.md#gen_aiinputmessages) | +| [`gen_ai.response.text`](./gen_ai.md#gen_airesponsetext) | [`gen_ai.output.messages`](./gen_ai.md#gen_aioutputmessages) | +| [`gen_ai.response.tool_calls`](./gen_ai.md#gen_airesponsetool_calls) | [`gen_ai.output.messages`](./gen_ai.md#gen_aioutputmessages) | +| [`gen_ai.system.message`](./gen_ai.md#gen_aisystemmessage) | [`gen_ai.system_instructions`](./gen_ai.md#gen_aisystem_instructions) | | [`gen_ai.usage.completion_tokens`](./gen_ai.md#gen_aiusagecompletion_tokens) | [`gen_ai.usage.output_tokens`](./gen_ai.md#gen_aiusageoutput_tokens) | | [`gen_ai.usage.prompt_tokens`](./gen_ai.md#gen_aiusageprompt_tokens) | [`gen_ai.usage.input_tokens`](./gen_ai.md#gen_aiusageinput_tokens) | | [`http.client_ip`](./http.md#httpclient_ip) | [`client.address`](./client.md#clientaddress) | diff --git a/generated/attributes/gen_ai.md b/generated/attributes/gen_ai.md index 07713dfd..2635540c 100644 --- a/generated/attributes/gen_ai.md +++ b/generated/attributes/gen_ai.md @@ -10,13 +10,13 @@ - [gen_ai.cost.output_tokens](#gen_aicostoutput_tokens) - [gen_ai.cost.total_tokens](#gen_aicosttotal_tokens) - [gen_ai.embeddings.input](#gen_aiembeddingsinput) + - [gen_ai.input.messages](#gen_aiinputmessages) - [gen_ai.operation.name](#gen_aioperationname) - [gen_ai.operation.type](#gen_aioperationtype) + - [gen_ai.output.messages](#gen_aioutputmessages) - [gen_ai.pipeline.name](#gen_aipipelinename) - - [gen_ai.request.available_tools](#gen_airequestavailable_tools) - [gen_ai.request.frequency_penalty](#gen_airequestfrequency_penalty) - [gen_ai.request.max_tokens](#gen_airequestmax_tokens) - - [gen_ai.request.messages](#gen_airequestmessages) - [gen_ai.request.model](#gen_airequestmodel) - [gen_ai.request.presence_penalty](#gen_airequestpresence_penalty) - [gen_ai.request.seed](#gen_airequestseed) @@ -27,11 +27,12 @@ - [gen_ai.response.id](#gen_airesponseid) - [gen_ai.response.model](#gen_airesponsemodel) - [gen_ai.response.streaming](#gen_airesponsestreaming) - - [gen_ai.response.text](#gen_airesponsetext) - [gen_ai.response.tokens_per_second](#gen_airesponsetokens_per_second) - - [gen_ai.response.tool_calls](#gen_airesponsetool_calls) - [gen_ai.system](#gen_aisystem) - - [gen_ai.system.message](#gen_aisystemmessage) + - [gen_ai.system_instructions](#gen_aisystem_instructions) + - [gen_ai.tool.call.arguments](#gen_aitoolcallarguments) + - [gen_ai.tool.call.result](#gen_aitoolcallresult) + - [gen_ai.tool.definitions](#gen_aitooldefinitions) - [gen_ai.tool.description](#gen_aitooldescription) - [gen_ai.tool.input](#gen_aitoolinput) - [gen_ai.tool.message](#gen_aitoolmessage) @@ -47,6 +48,11 @@ - [gen_ai.user.message](#gen_aiusermessage) - [Deprecated Attributes](#deprecated-attributes) - [gen_ai.prompt](#gen_aiprompt) + - [gen_ai.request.available_tools](#gen_airequestavailable_tools) + - [gen_ai.request.messages](#gen_airequestmessages) + - [gen_ai.response.text](#gen_airesponsetext) + - [gen_ai.response.tool_calls](#gen_airesponsetool_calls) + - [gen_ai.system.message](#gen_aisystemmessage) - [gen_ai.usage.completion_tokens](#gen_aiusagecompletion_tokens) - [gen_ai.usage.prompt_tokens](#gen_aiusageprompt_tokens) @@ -129,6 +135,17 @@ The input to the embeddings model. | Exists in OpenTelemetry | No | | Example | `What's the weather in Paris?` | +### gen_ai.input.messages + +The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`. + +| Property | Value | +| --- | --- | +| Type | `string` | +| Has PII | maybe | +| Exists in OpenTelemetry | Yes | +| Example | `[{"role": "user", "parts": [{"type": "text", "content": "Weather in Paris?"}]}, {"role": "assistant", "parts": [{"type": "tool_call", "id": "call_VSPygqKTWdrhaFErNvMV18Yl", "name": "get_weather", "arguments": {"location": "Paris"}}]}, {"role": "tool", "parts": [{"type": "tool_call_response", "id": "call_VSPygqKTWdrhaFErNvMV18Yl", "result": "rainy, 57°F"}]}]` | + ### gen_ai.operation.name The name of the operation being performed. @@ -151,28 +168,28 @@ The type of AI operation. Must be one of 'agent', 'ai_client', 'tool', 'handoff' | Exists in OpenTelemetry | No | | Example | `tool` | -### gen_ai.pipeline.name +### gen_ai.output.messages -Name of the AI pipeline or chain being executed. +The model's response messages. It has to be a stringified version of an array of message objects, which can include text responses and tool calls. | Property | Value | | --- | --- | | Type | `string` | | Has PII | maybe | -| Exists in OpenTelemetry | No | -| Example | `Autofix Pipeline` | -| Aliases | `ai.pipeline.name` | +| Exists in OpenTelemetry | Yes | +| Example | `[{"role": "assistant", "parts": [{"type": "text", "content": "The weather in Paris is currently rainy with a temperature of 57°F."}], "finish_reason": "stop"}]` | -### gen_ai.request.available_tools +### gen_ai.pipeline.name -The available tools for the model. It has to be a stringified version of an array of objects. +Name of the AI pipeline or chain being executed. | Property | Value | | --- | --- | | Type | `string` | | Has PII | maybe | | Exists in OpenTelemetry | No | -| Example | `[{"name": "get_weather", "description": "Get the weather for a given location"}, {"name": "get_news", "description": "Get the news for a given topic"}]` | +| Example | `Autofix Pipeline` | +| Aliases | `ai.pipeline.name` | ### gen_ai.request.frequency_penalty @@ -197,18 +214,6 @@ The maximum number of tokens to generate in the response. | Exists in OpenTelemetry | Yes | | Example | `2048` | -### gen_ai.request.messages - -The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`. - -| Property | Value | -| --- | --- | -| Type | `string` | -| Has PII | maybe | -| Exists in OpenTelemetry | No | -| Example | `[{"role": "system", "content": "Generate a random number."}, {"role": "user", "content": [{"text": "Generate a random number between 0 and 10.", "type": "text"}]}, {"role": "tool", "content": {"toolCallId": "1", "toolName": "Weather", "output": "rainy"}}]` | -| Aliases | `ai.input_messages` | - ### gen_ai.request.model The model identifier being used for the request. @@ -328,61 +333,72 @@ Whether or not the AI model call's response was streamed back asynchronously | Example | `true` | | Aliases | `ai.streaming` | -### gen_ai.response.text +### gen_ai.response.tokens_per_second -The model's response text messages. It has to be a stringified version of an array of response text messages. +The total output tokens per seconds throughput + +| Property | Value | +| --- | --- | +| Type | `double` | +| Has PII | false | +| Exists in OpenTelemetry | No | +| Example | `12345.67` | + +### gen_ai.system + +The provider of the model. | Property | Value | | --- | --- | | Type | `string` | | Has PII | maybe | -| Exists in OpenTelemetry | No | -| Example | `["The weather in Paris is rainy and overcast, with temperatures around 57°F", "The weather in London is sunny and warm, with temperatures around 65°F"]` | +| Exists in OpenTelemetry | Yes | +| Example | `openai` | +| Aliases | `ai.model.provider` | -### gen_ai.response.tokens_per_second +### gen_ai.system_instructions -The total output tokens per seconds throughput +The system instructions passed to the model. | Property | Value | | --- | --- | -| Type | `double` | -| Has PII | false | -| Exists in OpenTelemetry | No | -| Example | `12345.67` | +| Type | `string` | +| Has PII | maybe | +| Exists in OpenTelemetry | Yes | +| Example | `You are a helpful assistant` | -### gen_ai.response.tool_calls +### gen_ai.tool.call.arguments -The tool calls in the model's response. It has to be a stringified version of an array of objects. +The arguments of the tool call. It has to be a stringified version of the arguments to the tool. | Property | Value | | --- | --- | | Type | `string` | | Has PII | maybe | -| Exists in OpenTelemetry | No | -| Example | `[{"name": "get_weather", "arguments": {"location": "Paris"}}]` | +| Exists in OpenTelemetry | Yes | +| Example | `{"location": "Paris"}` | -### gen_ai.system +### gen_ai.tool.call.result -The provider of the model. +The result of the tool call. It has to be a stringified version of the result of the tool. | Property | Value | | --- | --- | | Type | `string` | | Has PII | maybe | | Exists in OpenTelemetry | Yes | -| Example | `openai` | -| Aliases | `ai.model.provider` | +| Example | `rainy, 57°F` | -### gen_ai.system.message +### gen_ai.tool.definitions -The system instructions passed to the model. +The list of source system tool definitions available to the GenAI agent or model. | Property | Value | | --- | --- | | Type | `string` | -| Has PII | true | -| Exists in OpenTelemetry | No | -| Example | `You are a helpful assistant` | +| Has PII | maybe | +| Exists in OpenTelemetry | Yes | +| Example | `[{"type": "function", "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}}, "required": ["location", "unit"]}}]` | ### gen_ai.tool.description @@ -548,6 +564,67 @@ The input messages sent to the model | Deprecated | Yes, no replacement at this time | | Deprecation Reason | Deprecated from OTEL, use gen_ai.input.messages with the new format instead. | +### gen_ai.request.available_tools + +The available tools for the model. It has to be a stringified version of an array of objects. + +| Property | Value | +| --- | --- | +| Type | `string` | +| Has PII | maybe | +| Exists in OpenTelemetry | No | +| Example | `[{"name": "get_weather", "description": "Get the weather for a given location"}, {"name": "get_news", "description": "Get the news for a given topic"}]` | +| Deprecated | Yes, use `gen_ai.tool.definitions` instead | + +### gen_ai.request.messages + +The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`. + +| Property | Value | +| --- | --- | +| Type | `string` | +| Has PII | maybe | +| Exists in OpenTelemetry | No | +| Example | `[{"role": "system", "content": "Generate a random number."}, {"role": "user", "content": [{"text": "Generate a random number between 0 and 10.", "type": "text"}]}, {"role": "tool", "content": {"toolCallId": "1", "toolName": "Weather", "output": "rainy"}}]` | +| Deprecated | Yes, use `gen_ai.input.messages` instead | +| Aliases | `ai.input_messages` | + +### gen_ai.response.text + +The model's response text messages. It has to be a stringified version of an array of response text messages. + +| Property | Value | +| --- | --- | +| Type | `string` | +| Has PII | maybe | +| Exists in OpenTelemetry | No | +| Example | `["The weather in Paris is rainy and overcast, with temperatures around 57°F", "The weather in London is sunny and warm, with temperatures around 65°F"]` | +| Deprecated | Yes, use `gen_ai.output.messages` instead | + +### gen_ai.response.tool_calls + +The tool calls in the model's response. It has to be a stringified version of an array of objects. + +| Property | Value | +| --- | --- | +| Type | `string` | +| Has PII | maybe | +| Exists in OpenTelemetry | No | +| Example | `[{"name": "get_weather", "arguments": {"location": "Paris"}}]` | +| Deprecated | Yes, use `gen_ai.output.messages` instead | + +### gen_ai.system.message + +The system instructions passed to the model. + +| Property | Value | +| --- | --- | +| Type | `string` | +| Has PII | true | +| Exists in OpenTelemetry | No | +| Example | `You are a helpful assistant` | +| Deprecated | Yes, use `gen_ai.system_instructions` instead | + ### gen_ai.usage.completion_tokens The number of tokens used in the GenAI response (completion). diff --git a/javascript/sentry-conventions/src/attributes.ts b/javascript/sentry-conventions/src/attributes.ts index d3ccaf41..26232b2b 100644 --- a/javascript/sentry-conventions/src/attributes.ts +++ b/javascript/sentry-conventions/src/attributes.ts @@ -2145,6 +2145,26 @@ export const GEN_AI_EMBEDDINGS_INPUT = 'gen_ai.embeddings.input'; */ export type GEN_AI_EMBEDDINGS_INPUT_TYPE = string; +// Path: model/attributes/gen_ai/gen_ai__input__messages.json + +/** + * The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`. `gen_ai.input.messages` + * + * Attribute Value Type: `string` {@link GEN_AI_INPUT_MESSAGES_TYPE} + * + * Contains PII: maybe + * + * Attribute defined in OTEL: Yes + * + * @example "[{\"role\": \"user\", \"parts\": [{\"type\": \"text\", \"content\": \"Weather in Paris?\"}]}, {\"role\": \"assistant\", \"parts\": [{\"type\": \"tool_call\", \"id\": \"call_VSPygqKTWdrhaFErNvMV18Yl\", \"name\": \"get_weather\", \"arguments\": {\"location\": \"Paris\"}}]}, {\"role\": \"tool\", \"parts\": [{\"type\": \"tool_call_response\", \"id\": \"call_VSPygqKTWdrhaFErNvMV18Yl\", \"result\": \"rainy, 57°F\"}]}]" + */ +export const GEN_AI_INPUT_MESSAGES = 'gen_ai.input.messages'; + +/** + * Type for {@link GEN_AI_INPUT_MESSAGES} gen_ai.input.messages + */ +export type GEN_AI_INPUT_MESSAGES_TYPE = string; + // Path: model/attributes/gen_ai/gen_ai__operation__name.json /** @@ -2185,6 +2205,26 @@ export const GEN_AI_OPERATION_TYPE = 'gen_ai.operation.type'; */ export type GEN_AI_OPERATION_TYPE_TYPE = string; +// Path: model/attributes/gen_ai/gen_ai__output__messages.json + +/** + * The model's response messages. It has to be a stringified version of an array of message objects, which can include text responses and tool calls. `gen_ai.output.messages` + * + * Attribute Value Type: `string` {@link GEN_AI_OUTPUT_MESSAGES_TYPE} + * + * Contains PII: maybe + * + * Attribute defined in OTEL: Yes + * + * @example "[{\"role\": \"assistant\", \"parts\": [{\"type\": \"text\", \"content\": \"The weather in Paris is currently rainy with a temperature of 57°F.\"}], \"finish_reason\": \"stop\"}]" + */ +export const GEN_AI_OUTPUT_MESSAGES = 'gen_ai.output.messages'; + +/** + * Type for {@link GEN_AI_OUTPUT_MESSAGES} gen_ai.output.messages + */ +export type GEN_AI_OUTPUT_MESSAGES_TYPE = string; + // Path: model/attributes/gen_ai/gen_ai__pipeline__name.json /** @@ -2239,6 +2279,7 @@ export type GEN_AI_PROMPT_TYPE = string; * * Attribute defined in OTEL: No * + * @deprecated Use {@link GEN_AI_TOOL_DEFINITIONS} (gen_ai.tool.definitions) instead * @example "[{\"name\": \"get_weather\", \"description\": \"Get the weather for a given location\"}, {\"name\": \"get_news\", \"description\": \"Get the news for a given topic\"}]" */ export const GEN_AI_REQUEST_AVAILABLE_TOOLS = 'gen_ai.request.available_tools'; @@ -2303,6 +2344,7 @@ export type GEN_AI_REQUEST_MAX_TOKENS_TYPE = number; * * Aliases: {@link AI_INPUT_MESSAGES} `ai.input_messages` * + * @deprecated Use {@link GEN_AI_INPUT_MESSAGES} (gen_ai.input.messages) instead * @example "[{\"role\": \"system\", \"content\": \"Generate a random number.\"}, {\"role\": \"user\", \"content\": [{\"text\": \"Generate a random number between 0 and 10.\", \"type\": \"text\"}]}, {\"role\": \"tool\", \"content\": {\"toolCallId\": \"1\", \"toolName\": \"Weather\", \"output\": \"rainy\"}}]" */ export const GEN_AI_REQUEST_MESSAGES = 'gen_ai.request.messages'; @@ -2541,6 +2583,7 @@ export type GEN_AI_RESPONSE_STREAMING_TYPE = boolean; * * Attribute defined in OTEL: No * + * @deprecated Use {@link GEN_AI_OUTPUT_MESSAGES} (gen_ai.output.messages) instead * @example "[\"The weather in Paris is rainy and overcast, with temperatures around 57°F\", \"The weather in London is sunny and warm, with temperatures around 65°F\"]" */ export const GEN_AI_RESPONSE_TEXT = 'gen_ai.response.text'; @@ -2581,6 +2624,7 @@ export type GEN_AI_RESPONSE_TOKENS_PER_SECOND_TYPE = number; * * Attribute defined in OTEL: No * + * @deprecated Use {@link GEN_AI_OUTPUT_MESSAGES} (gen_ai.output.messages) instead * @example "[{\"name\": \"get_weather\", \"arguments\": {\"location\": \"Paris\"}}]" */ export const GEN_AI_RESPONSE_TOOL_CALLS = 'gen_ai.response.tool_calls'; @@ -2612,6 +2656,26 @@ export const GEN_AI_SYSTEM = 'gen_ai.system'; */ export type GEN_AI_SYSTEM_TYPE = string; +// Path: model/attributes/gen_ai/gen_ai__system_instructions.json + +/** + * The system instructions passed to the model. `gen_ai.system_instructions` + * + * Attribute Value Type: `string` {@link GEN_AI_SYSTEM_INSTRUCTIONS_TYPE} + * + * Contains PII: maybe + * + * Attribute defined in OTEL: Yes + * + * @example "You are a helpful assistant" + */ +export const GEN_AI_SYSTEM_INSTRUCTIONS = 'gen_ai.system_instructions'; + +/** + * Type for {@link GEN_AI_SYSTEM_INSTRUCTIONS} gen_ai.system_instructions + */ +export type GEN_AI_SYSTEM_INSTRUCTIONS_TYPE = string; + // Path: model/attributes/gen_ai/gen_ai__system__message.json /** @@ -2623,6 +2687,7 @@ export type GEN_AI_SYSTEM_TYPE = string; * * Attribute defined in OTEL: No * + * @deprecated Use {@link GEN_AI_SYSTEM_INSTRUCTIONS} (gen_ai.system_instructions) instead * @example "You are a helpful assistant" */ export const GEN_AI_SYSTEM_MESSAGE = 'gen_ai.system.message'; @@ -2632,6 +2697,66 @@ export const GEN_AI_SYSTEM_MESSAGE = 'gen_ai.system.message'; */ export type GEN_AI_SYSTEM_MESSAGE_TYPE = string; +// Path: model/attributes/gen_ai/gen_ai__tool__call__arguments.json + +/** + * The arguments of the tool call. It has to be a stringified version of the arguments to the tool. `gen_ai.tool.call.arguments` + * + * Attribute Value Type: `string` {@link GEN_AI_TOOL_CALL_ARGUMENTS_TYPE} + * + * Contains PII: maybe + * + * Attribute defined in OTEL: Yes + * + * @example "{\"location\": \"Paris\"}" + */ +export const GEN_AI_TOOL_CALL_ARGUMENTS = 'gen_ai.tool.call.arguments'; + +/** + * Type for {@link GEN_AI_TOOL_CALL_ARGUMENTS} gen_ai.tool.call.arguments + */ +export type GEN_AI_TOOL_CALL_ARGUMENTS_TYPE = string; + +// Path: model/attributes/gen_ai/gen_ai__tool__call__result.json + +/** + * The result of the tool call. It has to be a stringified version of the result of the tool. `gen_ai.tool.call.result` + * + * Attribute Value Type: `string` {@link GEN_AI_TOOL_CALL_RESULT_TYPE} + * + * Contains PII: maybe + * + * Attribute defined in OTEL: Yes + * + * @example "rainy, 57°F" + */ +export const GEN_AI_TOOL_CALL_RESULT = 'gen_ai.tool.call.result'; + +/** + * Type for {@link GEN_AI_TOOL_CALL_RESULT} gen_ai.tool.call.result + */ +export type GEN_AI_TOOL_CALL_RESULT_TYPE = string; + +// Path: model/attributes/gen_ai/gen_ai__tool__definitions.json + +/** + * The list of source system tool definitions available to the GenAI agent or model. `gen_ai.tool.definitions` + * + * Attribute Value Type: `string` {@link GEN_AI_TOOL_DEFINITIONS_TYPE} + * + * Contains PII: maybe + * + * Attribute defined in OTEL: Yes + * + * @example "[{\"type\": \"function\", \"name\": \"get_current_weather\", \"description\": \"Get the current weather in a given location\", \"parameters\": {\"type\": \"object\", \"properties\": {\"location\": {\"type\": \"string\", \"description\": \"The city and state, e.g. San Francisco, CA\"}, \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]}}, \"required\": [\"location\", \"unit\"]}}]" + */ +export const GEN_AI_TOOL_DEFINITIONS = 'gen_ai.tool.definitions'; + +/** + * Type for {@link GEN_AI_TOOL_DEFINITIONS} gen_ai.tool.definitions + */ +export type GEN_AI_TOOL_DEFINITIONS_TYPE = string; + // Path: model/attributes/gen_ai/gen_ai__tool__description.json /** @@ -8765,8 +8890,10 @@ export const ATTRIBUTE_TYPE: Record = { [GEN_AI_COST_OUTPUT_TOKENS]: 'double', [GEN_AI_COST_TOTAL_TOKENS]: 'double', [GEN_AI_EMBEDDINGS_INPUT]: 'string', + [GEN_AI_INPUT_MESSAGES]: 'string', [GEN_AI_OPERATION_NAME]: 'string', [GEN_AI_OPERATION_TYPE]: 'string', + [GEN_AI_OUTPUT_MESSAGES]: 'string', [GEN_AI_PIPELINE_NAME]: 'string', [GEN_AI_PROMPT]: 'string', [GEN_AI_REQUEST_AVAILABLE_TOOLS]: 'string', @@ -8787,7 +8914,11 @@ export const ATTRIBUTE_TYPE: Record = { [GEN_AI_RESPONSE_TOKENS_PER_SECOND]: 'double', [GEN_AI_RESPONSE_TOOL_CALLS]: 'string', [GEN_AI_SYSTEM]: 'string', + [GEN_AI_SYSTEM_INSTRUCTIONS]: 'string', [GEN_AI_SYSTEM_MESSAGE]: 'string', + [GEN_AI_TOOL_CALL_ARGUMENTS]: 'string', + [GEN_AI_TOOL_CALL_RESULT]: 'string', + [GEN_AI_TOOL_DEFINITIONS]: 'string', [GEN_AI_TOOL_DESCRIPTION]: 'string', [GEN_AI_TOOL_INPUT]: 'string', [GEN_AI_TOOL_MESSAGE]: 'string', @@ -9183,8 +9314,10 @@ export type AttributeName = | typeof GEN_AI_COST_OUTPUT_TOKENS | typeof GEN_AI_COST_TOTAL_TOKENS | typeof GEN_AI_EMBEDDINGS_INPUT + | typeof GEN_AI_INPUT_MESSAGES | typeof GEN_AI_OPERATION_NAME | typeof GEN_AI_OPERATION_TYPE + | typeof GEN_AI_OUTPUT_MESSAGES | typeof GEN_AI_PIPELINE_NAME | typeof GEN_AI_PROMPT | typeof GEN_AI_REQUEST_AVAILABLE_TOOLS @@ -9205,7 +9338,11 @@ export type AttributeName = | typeof GEN_AI_RESPONSE_TOKENS_PER_SECOND | typeof GEN_AI_RESPONSE_TOOL_CALLS | typeof GEN_AI_SYSTEM + | typeof GEN_AI_SYSTEM_INSTRUCTIONS | typeof GEN_AI_SYSTEM_MESSAGE + | typeof GEN_AI_TOOL_CALL_ARGUMENTS + | typeof GEN_AI_TOOL_CALL_RESULT + | typeof GEN_AI_TOOL_DEFINITIONS | typeof GEN_AI_TOOL_DESCRIPTION | typeof GEN_AI_TOOL_INPUT | typeof GEN_AI_TOOL_MESSAGE @@ -10596,6 +10733,17 @@ export const ATTRIBUTE_METADATA: Record = { isInOtel: false, example: "What's the weather in Paris?", }, + [GEN_AI_INPUT_MESSAGES]: { + brief: + 'The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`.', + type: 'string', + pii: { + isPii: 'maybe', + }, + isInOtel: true, + example: + '[{"role": "user", "parts": [{"type": "text", "content": "Weather in Paris?"}]}, {"role": "assistant", "parts": [{"type": "tool_call", "id": "call_VSPygqKTWdrhaFErNvMV18Yl", "name": "get_weather", "arguments": {"location": "Paris"}}]}, {"role": "tool", "parts": [{"type": "tool_call_response", "id": "call_VSPygqKTWdrhaFErNvMV18Yl", "result": "rainy, 57°F"}]}]', + }, [GEN_AI_OPERATION_NAME]: { brief: 'The name of the operation being performed.', type: 'string', @@ -10615,6 +10763,17 @@ export const ATTRIBUTE_METADATA: Record = { isInOtel: false, example: 'tool', }, + [GEN_AI_OUTPUT_MESSAGES]: { + brief: + "The model's response messages. It has to be a stringified version of an array of message objects, which can include text responses and tool calls.", + type: 'string', + pii: { + isPii: 'maybe', + }, + isInOtel: true, + example: + '[{"role": "assistant", "parts": [{"type": "text", "content": "The weather in Paris is currently rainy with a temperature of 57°F."}], "finish_reason": "stop"}]', + }, [GEN_AI_PIPELINE_NAME]: { brief: 'Name of the AI pipeline or chain being executed.', type: 'string', @@ -10646,6 +10805,9 @@ export const ATTRIBUTE_METADATA: Record = { isInOtel: false, example: '[{"name": "get_weather", "description": "Get the weather for a given location"}, {"name": "get_news", "description": "Get the news for a given topic"}]', + deprecation: { + replacement: 'gen_ai.tool.definitions', + }, }, [GEN_AI_REQUEST_FREQUENCY_PENALTY]: { brief: @@ -10677,6 +10839,9 @@ export const ATTRIBUTE_METADATA: Record = { isInOtel: false, example: '[{"role": "system", "content": "Generate a random number."}, {"role": "user", "content": [{"text": "Generate a random number between 0 and 10.", "type": "text"}]}, {"role": "tool", "content": {"toolCallId": "1", "toolName": "Weather", "output": "rainy"}}]', + deprecation: { + replacement: 'gen_ai.input.messages', + }, aliases: [AI_INPUT_MESSAGES], }, [GEN_AI_REQUEST_MODEL]: { @@ -10792,6 +10957,9 @@ export const ATTRIBUTE_METADATA: Record = { isInOtel: false, example: '["The weather in Paris is rainy and overcast, with temperatures around 57°F", "The weather in London is sunny and warm, with temperatures around 65°F"]', + deprecation: { + replacement: 'gen_ai.output.messages', + }, }, [GEN_AI_RESPONSE_TOKENS_PER_SECOND]: { brief: 'The total output tokens per seconds throughput', @@ -10810,6 +10978,9 @@ export const ATTRIBUTE_METADATA: Record = { }, isInOtel: false, example: '[{"name": "get_weather", "arguments": {"location": "Paris"}}]', + deprecation: { + replacement: 'gen_ai.output.messages', + }, }, [GEN_AI_SYSTEM]: { brief: 'The provider of the model.', @@ -10821,6 +10992,15 @@ export const ATTRIBUTE_METADATA: Record = { example: 'openai', aliases: [AI_MODEL_PROVIDER], }, + [GEN_AI_SYSTEM_INSTRUCTIONS]: { + brief: 'The system instructions passed to the model.', + type: 'string', + pii: { + isPii: 'maybe', + }, + isInOtel: true, + example: 'You are a helpful assistant', + }, [GEN_AI_SYSTEM_MESSAGE]: { brief: 'The system instructions passed to the model.', type: 'string', @@ -10829,6 +11009,37 @@ export const ATTRIBUTE_METADATA: Record = { }, isInOtel: false, example: 'You are a helpful assistant', + deprecation: { + replacement: 'gen_ai.system_instructions', + }, + }, + [GEN_AI_TOOL_CALL_ARGUMENTS]: { + brief: 'The arguments of the tool call. It has to be a stringified version of the arguments to the tool.', + type: 'string', + pii: { + isPii: 'maybe', + }, + isInOtel: true, + example: '{"location": "Paris"}', + }, + [GEN_AI_TOOL_CALL_RESULT]: { + brief: 'The result of the tool call. It has to be a stringified version of the result of the tool.', + type: 'string', + pii: { + isPii: 'maybe', + }, + isInOtel: true, + example: 'rainy, 57°F', + }, + [GEN_AI_TOOL_DEFINITIONS]: { + brief: 'The list of source system tool definitions available to the GenAI agent or model.', + type: 'string', + pii: { + isPii: 'maybe', + }, + isInOtel: true, + example: + '[{"type": "function", "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}}, "required": ["location", "unit"]}}]', }, [GEN_AI_TOOL_DESCRIPTION]: { brief: 'The description of the tool being used.', @@ -13843,8 +14054,10 @@ export type Attributes = { [GEN_AI_COST_OUTPUT_TOKENS]?: GEN_AI_COST_OUTPUT_TOKENS_TYPE; [GEN_AI_COST_TOTAL_TOKENS]?: GEN_AI_COST_TOTAL_TOKENS_TYPE; [GEN_AI_EMBEDDINGS_INPUT]?: GEN_AI_EMBEDDINGS_INPUT_TYPE; + [GEN_AI_INPUT_MESSAGES]?: GEN_AI_INPUT_MESSAGES_TYPE; [GEN_AI_OPERATION_NAME]?: GEN_AI_OPERATION_NAME_TYPE; [GEN_AI_OPERATION_TYPE]?: GEN_AI_OPERATION_TYPE_TYPE; + [GEN_AI_OUTPUT_MESSAGES]?: GEN_AI_OUTPUT_MESSAGES_TYPE; [GEN_AI_PIPELINE_NAME]?: GEN_AI_PIPELINE_NAME_TYPE; [GEN_AI_PROMPT]?: GEN_AI_PROMPT_TYPE; [GEN_AI_REQUEST_AVAILABLE_TOOLS]?: GEN_AI_REQUEST_AVAILABLE_TOOLS_TYPE; @@ -13865,7 +14078,11 @@ export type Attributes = { [GEN_AI_RESPONSE_TOKENS_PER_SECOND]?: GEN_AI_RESPONSE_TOKENS_PER_SECOND_TYPE; [GEN_AI_RESPONSE_TOOL_CALLS]?: GEN_AI_RESPONSE_TOOL_CALLS_TYPE; [GEN_AI_SYSTEM]?: GEN_AI_SYSTEM_TYPE; + [GEN_AI_SYSTEM_INSTRUCTIONS]?: GEN_AI_SYSTEM_INSTRUCTIONS_TYPE; [GEN_AI_SYSTEM_MESSAGE]?: GEN_AI_SYSTEM_MESSAGE_TYPE; + [GEN_AI_TOOL_CALL_ARGUMENTS]?: GEN_AI_TOOL_CALL_ARGUMENTS_TYPE; + [GEN_AI_TOOL_CALL_RESULT]?: GEN_AI_TOOL_CALL_RESULT_TYPE; + [GEN_AI_TOOL_DEFINITIONS]?: GEN_AI_TOOL_DEFINITIONS_TYPE; [GEN_AI_TOOL_DESCRIPTION]?: GEN_AI_TOOL_DESCRIPTION_TYPE; [GEN_AI_TOOL_INPUT]?: GEN_AI_TOOL_INPUT_TYPE; [GEN_AI_TOOL_MESSAGE]?: GEN_AI_TOOL_MESSAGE_TYPE; diff --git a/model/attributes/gen_ai/gen_ai__input__messages.json b/model/attributes/gen_ai/gen_ai__input__messages.json new file mode 100644 index 00000000..15b0584c --- /dev/null +++ b/model/attributes/gen_ai/gen_ai__input__messages.json @@ -0,0 +1,10 @@ +{ + "key": "gen_ai.input.messages", + "brief": "The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `\"user\"`, `\"assistant\"`, `\"tool\"`, or `\"system\"`. For messages of the role `\"tool\"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: \"text\", text:\"...\"}`.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": true, + "example": "[{\"role\": \"user\", \"parts\": [{\"type\": \"text\", \"content\": \"Weather in Paris?\"}]}, {\"role\": \"assistant\", \"parts\": [{\"type\": \"tool_call\", \"id\": \"call_VSPygqKTWdrhaFErNvMV18Yl\", \"name\": \"get_weather\", \"arguments\": {\"location\": \"Paris\"}}]}, {\"role\": \"tool\", \"parts\": [{\"type\": \"tool_call_response\", \"id\": \"call_VSPygqKTWdrhaFErNvMV18Yl\", \"result\": \"rainy, 57°F\"}]}]" +} diff --git a/model/attributes/gen_ai/gen_ai__output__messages.json b/model/attributes/gen_ai/gen_ai__output__messages.json new file mode 100644 index 00000000..d6a12379 --- /dev/null +++ b/model/attributes/gen_ai/gen_ai__output__messages.json @@ -0,0 +1,10 @@ +{ + "key": "gen_ai.output.messages", + "brief": "The model's response messages. It has to be a stringified version of an array of message objects, which can include text responses and tool calls.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": true, + "example": "[{\"role\": \"assistant\", \"parts\": [{\"type\": \"text\", \"content\": \"The weather in Paris is currently rainy with a temperature of 57°F.\"}], \"finish_reason\": \"stop\"}]" +} diff --git a/model/attributes/gen_ai/gen_ai__request__available_tools.json b/model/attributes/gen_ai/gen_ai__request__available_tools.json index 0a288af0..69066810 100644 --- a/model/attributes/gen_ai/gen_ai__request__available_tools.json +++ b/model/attributes/gen_ai/gen_ai__request__available_tools.json @@ -7,5 +7,9 @@ }, "is_in_otel": false, "example": "[{\"name\": \"get_weather\", \"description\": \"Get the weather for a given location\"}, {\"name\": \"get_news\", \"description\": \"Get the news for a given topic\"}]", - "alias": [] + "alias": [], + "deprecation": { + "_status": null, + "replacement": "gen_ai.tool.definitions" + } } diff --git a/model/attributes/gen_ai/gen_ai__request__messages.json b/model/attributes/gen_ai/gen_ai__request__messages.json index 5875b906..8e443473 100644 --- a/model/attributes/gen_ai/gen_ai__request__messages.json +++ b/model/attributes/gen_ai/gen_ai__request__messages.json @@ -7,5 +7,9 @@ }, "is_in_otel": false, "example": "[{\"role\": \"system\", \"content\": \"Generate a random number.\"}, {\"role\": \"user\", \"content\": [{\"text\": \"Generate a random number between 0 and 10.\", \"type\": \"text\"}]}, {\"role\": \"tool\", \"content\": {\"toolCallId\": \"1\", \"toolName\": \"Weather\", \"output\": \"rainy\"}}]", - "alias": ["ai.input_messages"] + "alias": ["ai.input_messages"], + "deprecation": { + "_status": null, + "replacement": "gen_ai.input.messages" + } } diff --git a/model/attributes/gen_ai/gen_ai__response__text.json b/model/attributes/gen_ai/gen_ai__response__text.json index d4db9fde..239012a1 100644 --- a/model/attributes/gen_ai/gen_ai__response__text.json +++ b/model/attributes/gen_ai/gen_ai__response__text.json @@ -7,5 +7,9 @@ }, "is_in_otel": false, "example": "[\"The weather in Paris is rainy and overcast, with temperatures around 57°F\", \"The weather in London is sunny and warm, with temperatures around 65°F\"]", - "alias": [] + "alias": [], + "deprecation": { + "_status": null, + "replacement": "gen_ai.output.messages" + } } diff --git a/model/attributes/gen_ai/gen_ai__response__tool_calls.json b/model/attributes/gen_ai/gen_ai__response__tool_calls.json index eddd26b8..34cf8136 100644 --- a/model/attributes/gen_ai/gen_ai__response__tool_calls.json +++ b/model/attributes/gen_ai/gen_ai__response__tool_calls.json @@ -7,5 +7,9 @@ }, "is_in_otel": false, "example": "[{\"name\": \"get_weather\", \"arguments\": {\"location\": \"Paris\"}}]", - "alias": [] + "alias": [], + "deprecation": { + "_status": null, + "replacement": "gen_ai.output.messages" + } } diff --git a/model/attributes/gen_ai/gen_ai__system__message.json b/model/attributes/gen_ai/gen_ai__system__message.json index 3e89f602..aee9357f 100644 --- a/model/attributes/gen_ai/gen_ai__system__message.json +++ b/model/attributes/gen_ai/gen_ai__system__message.json @@ -6,5 +6,9 @@ "key": "true" }, "is_in_otel": false, - "example": "You are a helpful assistant" + "example": "You are a helpful assistant", + "deprecation": { + "_status": null, + "replacement": "gen_ai.system_instructions" + } } diff --git a/model/attributes/gen_ai/gen_ai__system_instructions.json b/model/attributes/gen_ai/gen_ai__system_instructions.json new file mode 100644 index 00000000..c2d98d09 --- /dev/null +++ b/model/attributes/gen_ai/gen_ai__system_instructions.json @@ -0,0 +1,10 @@ +{ + "key": "gen_ai.system_instructions", + "brief": "The system instructions passed to the model.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": true, + "example": "You are a helpful assistant" +} diff --git a/model/attributes/gen_ai/gen_ai__tool__call__arguments.json b/model/attributes/gen_ai/gen_ai__tool__call__arguments.json new file mode 100644 index 00000000..1d9892cf --- /dev/null +++ b/model/attributes/gen_ai/gen_ai__tool__call__arguments.json @@ -0,0 +1,10 @@ +{ + "key": "gen_ai.tool.call.arguments", + "brief": "The arguments of the tool call. It has to be a stringified version of the arguments to the tool.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": true, + "example": "{\"location\": \"Paris\"}" +} diff --git a/model/attributes/gen_ai/gen_ai__tool__call__result.json b/model/attributes/gen_ai/gen_ai__tool__call__result.json new file mode 100644 index 00000000..724f62b0 --- /dev/null +++ b/model/attributes/gen_ai/gen_ai__tool__call__result.json @@ -0,0 +1,10 @@ +{ + "key": "gen_ai.tool.call.result", + "brief": "The result of the tool call. It has to be a stringified version of the result of the tool.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": true, + "example": "rainy, 57°F" +} diff --git a/model/attributes/gen_ai/gen_ai__tool__definitions.json b/model/attributes/gen_ai/gen_ai__tool__definitions.json new file mode 100644 index 00000000..7ab0f733 --- /dev/null +++ b/model/attributes/gen_ai/gen_ai__tool__definitions.json @@ -0,0 +1,10 @@ +{ + "key": "gen_ai.tool.definitions", + "brief": "The list of source system tool definitions available to the GenAI agent or model.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": true, + "example": "[{\"type\": \"function\", \"name\": \"get_current_weather\", \"description\": \"Get the current weather in a given location\", \"parameters\": {\"type\": \"object\", \"properties\": {\"location\": {\"type\": \"string\", \"description\": \"The city and state, e.g. San Francisco, CA\"}, \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]}}, \"required\": [\"location\", \"unit\"]}}]" +} diff --git a/python/src/sentry_conventions/attributes.py b/python/src/sentry_conventions/attributes.py index 7bd158e8..a541d592 100644 --- a/python/src/sentry_conventions/attributes.py +++ b/python/src/sentry_conventions/attributes.py @@ -117,6 +117,11 @@ class _AttributeNamesMeta(type): "ENVIRONMENT", "FS_ERROR", "GEN_AI_PROMPT", + "GEN_AI_REQUEST_AVAILABLE_TOOLS", + "GEN_AI_REQUEST_MESSAGES", + "GEN_AI_RESPONSE_TEXT", + "GEN_AI_RESPONSE_TOOL_CALLS", + "GEN_AI_SYSTEM_MESSAGE", "GEN_AI_USAGE_COMPLETION_TOKENS", "GEN_AI_USAGE_PROMPT_TOKENS", "HTTP_CLIENT_IP", @@ -1285,6 +1290,16 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): Example: "What's the weather in Paris?" """ + # Path: model/attributes/gen_ai/gen_ai__input__messages.json + GEN_AI_INPUT_MESSAGES: Literal["gen_ai.input.messages"] = "gen_ai.input.messages" + """The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`. + + Type: str + Contains PII: maybe + Defined in OTEL: Yes + Example: "[{\"role\": \"user\", \"parts\": [{\"type\": \"text\", \"content\": \"Weather in Paris?\"}]}, {\"role\": \"assistant\", \"parts\": [{\"type\": \"tool_call\", \"id\": \"call_VSPygqKTWdrhaFErNvMV18Yl\", \"name\": \"get_weather\", \"arguments\": {\"location\": \"Paris\"}}]}, {\"role\": \"tool\", \"parts\": [{\"type\": \"tool_call_response\", \"id\": \"call_VSPygqKTWdrhaFErNvMV18Yl\", \"result\": \"rainy, 57°F\"}]}]" + """ + # Path: model/attributes/gen_ai/gen_ai__operation__name.json GEN_AI_OPERATION_NAME: Literal["gen_ai.operation.name"] = "gen_ai.operation.name" """The name of the operation being performed. @@ -1305,6 +1320,16 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): Example: "tool" """ + # Path: model/attributes/gen_ai/gen_ai__output__messages.json + GEN_AI_OUTPUT_MESSAGES: Literal["gen_ai.output.messages"] = "gen_ai.output.messages" + """The model's response messages. It has to be a stringified version of an array of message objects, which can include text responses and tool calls. + + Type: str + Contains PII: maybe + Defined in OTEL: Yes + Example: "[{\"role\": \"assistant\", \"parts\": [{\"type\": \"text\", \"content\": \"The weather in Paris is currently rainy with a temperature of 57°F.\"}], \"finish_reason\": \"stop\"}]" + """ + # Path: model/attributes/gen_ai/gen_ai__pipeline__name.json GEN_AI_PIPELINE_NAME: Literal["gen_ai.pipeline.name"] = "gen_ai.pipeline.name" """Name of the AI pipeline or chain being executed. @@ -1336,6 +1361,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): Type: str Contains PII: maybe Defined in OTEL: No + DEPRECATED: Use gen_ai.tool.definitions instead Example: "[{\"name\": \"get_weather\", \"description\": \"Get the weather for a given location\"}, {\"name\": \"get_news\", \"description\": \"Get the news for a given topic\"}]" """ @@ -1374,6 +1400,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): Contains PII: maybe Defined in OTEL: No Aliases: ai.input_messages + DEPRECATED: Use gen_ai.input.messages instead Example: "[{\"role\": \"system\", \"content\": \"Generate a random number.\"}, {\"role\": \"user\", \"content\": [{\"text\": \"Generate a random number between 0 and 10.\", \"type\": \"text\"}]}, {\"role\": \"tool\", \"content\": {\"toolCallId\": \"1\", \"toolName\": \"Weather\", \"output\": \"rainy\"}}]" """ @@ -1501,6 +1528,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): Type: str Contains PII: maybe Defined in OTEL: No + DEPRECATED: Use gen_ai.output.messages instead Example: "[\"The weather in Paris is rainy and overcast, with temperatures around 57°F\", \"The weather in London is sunny and warm, with temperatures around 65°F\"]" """ @@ -1525,6 +1553,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): Type: str Contains PII: maybe Defined in OTEL: No + DEPRECATED: Use gen_ai.output.messages instead Example: "[{\"name\": \"get_weather\", \"arguments\": {\"location\": \"Paris\"}}]" """ @@ -1546,9 +1575,58 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): Type: str Contains PII: true Defined in OTEL: No + DEPRECATED: Use gen_ai.system_instructions instead Example: "You are a helpful assistant" """ + # Path: model/attributes/gen_ai/gen_ai__system_instructions.json + GEN_AI_SYSTEM_INSTRUCTIONS: Literal["gen_ai.system_instructions"] = ( + "gen_ai.system_instructions" + ) + """The system instructions passed to the model. + + Type: str + Contains PII: maybe + Defined in OTEL: Yes + Example: "You are a helpful assistant" + """ + + # Path: model/attributes/gen_ai/gen_ai__tool__call__arguments.json + GEN_AI_TOOL_CALL_ARGUMENTS: Literal["gen_ai.tool.call.arguments"] = ( + "gen_ai.tool.call.arguments" + ) + """The arguments of the tool call. It has to be a stringified version of the arguments to the tool. + + Type: str + Contains PII: maybe + Defined in OTEL: Yes + Example: "{\"location\": \"Paris\"}" + """ + + # Path: model/attributes/gen_ai/gen_ai__tool__call__result.json + GEN_AI_TOOL_CALL_RESULT: Literal["gen_ai.tool.call.result"] = ( + "gen_ai.tool.call.result" + ) + """The result of the tool call. It has to be a stringified version of the result of the tool. + + Type: str + Contains PII: maybe + Defined in OTEL: Yes + Example: "rainy, 57°F" + """ + + # Path: model/attributes/gen_ai/gen_ai__tool__definitions.json + GEN_AI_TOOL_DEFINITIONS: Literal["gen_ai.tool.definitions"] = ( + "gen_ai.tool.definitions" + ) + """The list of source system tool definitions available to the GenAI agent or model. + + Type: str + Contains PII: maybe + Defined in OTEL: Yes + Example: "[{\"type\": \"function\", \"name\": \"get_current_weather\", \"description\": \"Get the current weather in a given location\", \"parameters\": {\"type\": \"object\", \"properties\": {\"location\": {\"type\": \"string\", \"description\": \"The city and state, e.g. San Francisco, CA\"}, \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]}}, \"required\": [\"location\", \"unit\"]}}]" + """ + # Path: model/attributes/gen_ai/gen_ai__tool__description.json GEN_AI_TOOL_DESCRIPTION: Literal["gen_ai.tool.description"] = ( "gen_ai.tool.description" @@ -5550,6 +5628,13 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): is_in_otel=False, example="What's the weather in Paris?", ), + "gen_ai.input.messages": AttributeMetadata( + brief='The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `"user"`, `"assistant"`, `"tool"`, or `"system"`. For messages of the role `"tool"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: "text", text:"..."}`.', + type=AttributeType.STRING, + pii=PiiInfo(isPii=IsPii.MAYBE), + is_in_otel=True, + example='[{"role": "user", "parts": [{"type": "text", "content": "Weather in Paris?"}]}, {"role": "assistant", "parts": [{"type": "tool_call", "id": "call_VSPygqKTWdrhaFErNvMV18Yl", "name": "get_weather", "arguments": {"location": "Paris"}}]}, {"role": "tool", "parts": [{"type": "tool_call_response", "id": "call_VSPygqKTWdrhaFErNvMV18Yl", "result": "rainy, 57°F"}]}]', + ), "gen_ai.operation.name": AttributeMetadata( brief="The name of the operation being performed.", type=AttributeType.STRING, @@ -5564,6 +5649,13 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): is_in_otel=False, example="tool", ), + "gen_ai.output.messages": AttributeMetadata( + brief="The model's response messages. It has to be a stringified version of an array of message objects, which can include text responses and tool calls.", + type=AttributeType.STRING, + pii=PiiInfo(isPii=IsPii.MAYBE), + is_in_otel=True, + example='[{"role": "assistant", "parts": [{"type": "text", "content": "The weather in Paris is currently rainy with a temperature of 57°F."}], "finish_reason": "stop"}]', + ), "gen_ai.pipeline.name": AttributeMetadata( brief="Name of the AI pipeline or chain being executed.", type=AttributeType.STRING, @@ -5588,6 +5680,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): pii=PiiInfo(isPii=IsPii.MAYBE), is_in_otel=False, example='[{"name": "get_weather", "description": "Get the weather for a given location"}, {"name": "get_news", "description": "Get the news for a given topic"}]', + deprecation=DeprecationInfo(replacement="gen_ai.tool.definitions"), ), "gen_ai.request.frequency_penalty": AttributeMetadata( brief="Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.", @@ -5610,6 +5703,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): pii=PiiInfo(isPii=IsPii.MAYBE), is_in_otel=False, example='[{"role": "system", "content": "Generate a random number."}, {"role": "user", "content": [{"text": "Generate a random number between 0 and 10.", "type": "text"}]}, {"role": "tool", "content": {"toolCallId": "1", "toolName": "Weather", "output": "rainy"}}]', + deprecation=DeprecationInfo(replacement="gen_ai.input.messages"), aliases=["ai.input_messages"], ), "gen_ai.request.model": AttributeMetadata( @@ -5697,6 +5791,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): pii=PiiInfo(isPii=IsPii.MAYBE), is_in_otel=False, example='["The weather in Paris is rainy and overcast, with temperatures around 57°F", "The weather in London is sunny and warm, with temperatures around 65°F"]', + deprecation=DeprecationInfo(replacement="gen_ai.output.messages"), ), "gen_ai.response.tokens_per_second": AttributeMetadata( brief="The total output tokens per seconds throughput", @@ -5711,6 +5806,7 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): pii=PiiInfo(isPii=IsPii.MAYBE), is_in_otel=False, example='[{"name": "get_weather", "arguments": {"location": "Paris"}}]', + deprecation=DeprecationInfo(replacement="gen_ai.output.messages"), ), "gen_ai.system": AttributeMetadata( brief="The provider of the model.", @@ -5726,6 +5822,35 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): pii=PiiInfo(isPii=IsPii.TRUE), is_in_otel=False, example="You are a helpful assistant", + deprecation=DeprecationInfo(replacement="gen_ai.system_instructions"), + ), + "gen_ai.system_instructions": AttributeMetadata( + brief="The system instructions passed to the model.", + type=AttributeType.STRING, + pii=PiiInfo(isPii=IsPii.MAYBE), + is_in_otel=True, + example="You are a helpful assistant", + ), + "gen_ai.tool.call.arguments": AttributeMetadata( + brief="The arguments of the tool call. It has to be a stringified version of the arguments to the tool.", + type=AttributeType.STRING, + pii=PiiInfo(isPii=IsPii.MAYBE), + is_in_otel=True, + example='{"location": "Paris"}', + ), + "gen_ai.tool.call.result": AttributeMetadata( + brief="The result of the tool call. It has to be a stringified version of the result of the tool.", + type=AttributeType.STRING, + pii=PiiInfo(isPii=IsPii.MAYBE), + is_in_otel=True, + example="rainy, 57°F", + ), + "gen_ai.tool.definitions": AttributeMetadata( + brief="The list of source system tool definitions available to the GenAI agent or model.", + type=AttributeType.STRING, + pii=PiiInfo(isPii=IsPii.MAYBE), + is_in_otel=True, + example='[{"type": "function", "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}}, "required": ["location", "unit"]}}]', ), "gen_ai.tool.description": AttributeMetadata( brief="The description of the tool being used.", @@ -8075,8 +8200,10 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): "gen_ai.cost.output_tokens": float, "gen_ai.cost.total_tokens": float, "gen_ai.embeddings.input": str, + "gen_ai.input.messages": str, "gen_ai.operation.name": str, "gen_ai.operation.type": str, + "gen_ai.output.messages": str, "gen_ai.pipeline.name": str, "gen_ai.prompt": str, "gen_ai.request.available_tools": str, @@ -8098,6 +8225,10 @@ class ATTRIBUTE_NAMES(metaclass=_AttributeNamesMeta): "gen_ai.response.tool_calls": str, "gen_ai.system": str, "gen_ai.system.message": str, + "gen_ai.system_instructions": str, + "gen_ai.tool.call.arguments": str, + "gen_ai.tool.call.result": str, + "gen_ai.tool.definitions": str, "gen_ai.tool.description": str, "gen_ai.tool.input": str, "gen_ai.tool.message": str, diff --git a/shared/deprecated_attributes.json b/shared/deprecated_attributes.json index e0bd97d9..14475f02 100644 --- a/shared/deprecated_attributes.json +++ b/shared/deprecated_attributes.json @@ -594,6 +594,80 @@ "reason": "Deprecated from OTEL, use gen_ai.input.messages with the new format instead." } }, + { + "key": "gen_ai.request.available_tools", + "brief": "The available tools for the model. It has to be a stringified version of an array of objects.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": false, + "example": "[{\"name\": \"get_weather\", \"description\": \"Get the weather for a given location\"}, {\"name\": \"get_news\", \"description\": \"Get the news for a given topic\"}]", + "alias": [], + "deprecation": { + "_status": null, + "replacement": "gen_ai.tool.definitions" + } + }, + { + "key": "gen_ai.request.messages", + "brief": "The messages passed to the model. It has to be a stringified version of an array of objects. The `role` attribute of each object must be `\"user\"`, `\"assistant\"`, `\"tool\"`, or `\"system\"`. For messages of the role `\"tool\"`, the `content` can be a string or an arbitrary object with information about the tool call. For other messages the `content` can be either a string or a list of objects in the format `{type: \"text\", text:\"...\"}`.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": false, + "example": "[{\"role\": \"system\", \"content\": \"Generate a random number.\"}, {\"role\": \"user\", \"content\": [{\"text\": \"Generate a random number between 0 and 10.\", \"type\": \"text\"}]}, {\"role\": \"tool\", \"content\": {\"toolCallId\": \"1\", \"toolName\": \"Weather\", \"output\": \"rainy\"}}]", + "alias": ["ai.input_messages"], + "deprecation": { + "_status": null, + "replacement": "gen_ai.input.messages" + } + }, + { + "key": "gen_ai.response.text", + "brief": "The model's response text messages. It has to be a stringified version of an array of response text messages.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": false, + "example": "[\"The weather in Paris is rainy and overcast, with temperatures around 57°F\", \"The weather in London is sunny and warm, with temperatures around 65°F\"]", + "alias": [], + "deprecation": { + "_status": null, + "replacement": "gen_ai.output.messages" + } + }, + { + "key": "gen_ai.response.tool_calls", + "brief": "The tool calls in the model's response. It has to be a stringified version of an array of objects.", + "type": "string", + "pii": { + "key": "maybe" + }, + "is_in_otel": false, + "example": "[{\"name\": \"get_weather\", \"arguments\": {\"location\": \"Paris\"}}]", + "alias": [], + "deprecation": { + "_status": null, + "replacement": "gen_ai.output.messages" + } + }, + { + "key": "gen_ai.system.message", + "brief": "The system instructions passed to the model.", + "type": "string", + "pii": { + "key": "true" + }, + "is_in_otel": false, + "example": "You are a helpful assistant", + "deprecation": { + "_status": null, + "replacement": "gen_ai.system_instructions" + } + }, { "key": "gen_ai.usage.completion_tokens", "brief": "The number of tokens used in the GenAI response (completion).",