diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts index c63716a34fad..9fd05f83c5f9 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts +++ b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts @@ -31,34 +31,34 @@ test('should create AI spans with correct attributes', async ({ page }) => { // First AI call - should have telemetry enabled and record inputs/outputs (sendDefaultPii: true) /* const firstPipelineSpan = aiPipelineSpans[0]; - expect(firstPipelineSpan?.data?.['ai.model.id']).toBe('mock-model-id'); - expect(firstPipelineSpan?.data?.['ai.model.provider']).toBe('mock-provider'); - expect(firstPipelineSpan?.data?.['ai.prompt']).toContain('Where is the first span?'); + expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id'); + expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider'); + expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?'); expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!'); expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10); expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */ // Second AI call - explicitly enabled telemetry const secondPipelineSpan = aiPipelineSpans[0]; - expect(secondPipelineSpan?.data?.['ai.prompt']).toContain('Where is the second span?'); + expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?'); expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!'); // Third AI call - with tool calls /* const thirdPipelineSpan = aiPipelineSpans[2]; - expect(thirdPipelineSpan?.data?.['ai.response.finishReason']).toBe('tool-calls'); + expect(thirdPipelineSpan?.data?.['vercel.ai.response.finishReason']).toBe('tool-calls'); expect(thirdPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(15); expect(thirdPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(25); */ // Tool call span /* const toolSpan = toolCallSpans[0]; - expect(toolSpan?.data?.['ai.toolCall.name']).toBe('getWeather'); - expect(toolSpan?.data?.['ai.toolCall.id']).toBe('call-1'); - expect(toolSpan?.data?.['ai.toolCall.args']).toContain('San Francisco'); - expect(toolSpan?.data?.['ai.toolCall.result']).toContain('Sunny, 72°F'); */ + expect(toolSpan?.data?.['vercel.ai.toolCall.name']).toBe('getWeather'); + expect(toolSpan?.data?.['vercel.ai.toolCall.id']).toBe('call-1'); + expect(toolSpan?.data?.['vercel.ai.toolCall.args']).toContain('San Francisco'); + expect(toolSpan?.data?.['vercel.ai.toolCall.result']).toContain('Sunny, 72°F'); */ // Verify the fourth call was not captured (telemetry disabled) const promptsInSpans = spans - .map(span => span.data?.['ai.prompt']) + .map(span => span.data?.['vercel.ai.prompt']) .filter((prompt): prompt is string => prompt !== undefined); const hasDisabledPrompt = promptsInSpans.some(prompt => prompt.includes('Where is the third span?')); expect(hasDisabledPrompt).toBe(false); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 3566d40322de..f9b853aa4946 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -13,14 +13,14 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText', - 'ai.pipeline.name': 'generateText', - 'ai.response.finishReason': 'stop', - 'ai.settings.maxRetries': 2, - 'ai.settings.maxSteps': 1, - 'ai.streaming': false, + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -40,18 +40,18 @@ describe('Vercel AI integration', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'operation.name': 'ai.generateText.doGenerate', - 'ai.operationId': 'ai.generateText.doGenerate', - 'ai.model.provider': 'mock-provider', - 'ai.model.id': 'mock-model-id', - 'ai.settings.maxRetries': 2, + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.settings.maxRetries': 2, 'gen_ai.system': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', - 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.streaming': false, - 'ai.response.finishReason': 'stop', - 'ai.response.model': 'mock-model-id', - 'ai.response.id': expect.any(String), - 'ai.response.timestamp': expect.any(String), + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.streaming': false, + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.timestamp': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -67,16 +67,16 @@ describe('Vercel AI integration', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText', - 'ai.pipeline.name': 'generateText', - 'ai.prompt': '{"prompt":"Where is the second span?"}', - 'ai.response.finishReason': 'stop', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': expect.any(String), - 'ai.settings.maxRetries': 2, - 'ai.settings.maxSteps': 1, - 'ai.streaming': false, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, @@ -97,20 +97,20 @@ describe('Vercel AI integration', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'operation.name': 'ai.generateText.doGenerate', - 'ai.operationId': 'ai.generateText.doGenerate', - 'ai.model.provider': 'mock-provider', - 'ai.model.id': 'mock-model-id', - 'ai.settings.maxRetries': 2, + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.settings.maxRetries': 2, 'gen_ai.system': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', - 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.streaming': false, - 'ai.response.finishReason': 'stop', - 'ai.response.model': 'mock-model-id', - 'ai.response.id': expect.any(String), + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.streaming': false, + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.id': expect.any(String), 'gen_ai.response.text': expect.any(String), - 'ai.response.timestamp': expect.any(String), - 'ai.prompt.format': expect.any(String), + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.prompt.format': expect.any(String), 'gen_ai.request.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, @@ -127,14 +127,14 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText', - 'ai.pipeline.name': 'generateText', - 'ai.response.finishReason': 'tool-calls', - 'ai.settings.maxRetries': 2, - 'ai.settings.maxSteps': 1, - 'ai.streaming': false, + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -151,16 +151,16 @@ describe('Vercel AI integration', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText.doGenerate', - 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.response.finishReason': 'tool-calls', - 'ai.response.id': expect.any(String), - 'ai.response.model': 'mock-model-id', - 'ai.response.timestamp': expect.any(String), - 'ai.settings.maxRetries': 2, - 'ai.streaming': false, + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), @@ -181,7 +181,7 @@ describe('Vercel AI integration', () => { // Seventh span - tool call execution span expect.objectContaining({ data: { - 'ai.operationId': 'ai.toolCall', + 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', @@ -203,16 +203,16 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText', - 'ai.pipeline.name': 'generateText', - 'ai.prompt': '{"prompt":"Where is the first span?"}', - 'ai.response.finishReason': 'stop', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', + 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': 'First span here!', - 'ai.settings.maxRetries': 2, - 'ai.settings.maxSteps': 1, - 'ai.streaming': false, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, @@ -230,19 +230,19 @@ describe('Vercel AI integration', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText.doGenerate', - 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.prompt.format': 'prompt', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.prompt.format': 'prompt', 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', - 'ai.response.finishReason': 'stop', - 'ai.response.id': expect.any(String), - 'ai.response.model': 'mock-model-id', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', 'gen_ai.response.text': 'First span here!', - 'ai.response.timestamp': expect.any(String), - 'ai.settings.maxRetries': 2, - 'ai.streaming': false, + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.response.id': expect.any(String), @@ -263,16 +263,16 @@ describe('Vercel AI integration', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText', - 'ai.pipeline.name': 'generateText', - 'ai.prompt': '{"prompt":"Where is the second span?"}', - 'ai.response.finishReason': 'stop', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': expect.any(String), - 'ai.settings.maxRetries': 2, - 'ai.settings.maxSteps': 1, - 'ai.streaming': false, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, @@ -293,20 +293,20 @@ describe('Vercel AI integration', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'operation.name': 'ai.generateText.doGenerate', - 'ai.operationId': 'ai.generateText.doGenerate', - 'ai.model.provider': 'mock-provider', - 'ai.model.id': 'mock-model-id', - 'ai.settings.maxRetries': 2, + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.settings.maxRetries': 2, 'gen_ai.system': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', - 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.streaming': false, - 'ai.response.finishReason': 'stop', - 'ai.response.model': 'mock-model-id', - 'ai.response.id': expect.any(String), + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.streaming': false, + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.id': expect.any(String), 'gen_ai.response.text': expect.any(String), - 'ai.response.timestamp': expect.any(String), - 'ai.prompt.format': expect.any(String), + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.prompt.format': expect.any(String), 'gen_ai.request.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, @@ -323,17 +323,17 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText', - 'ai.pipeline.name': 'generateText', - 'ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'ai.response.finishReason': 'tool-calls', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'vercel.ai.response.finishReason': 'tool-calls', 'gen_ai.response.text': 'Tool call completed!', 'gen_ai.response.tool_calls': expect.any(String), - 'ai.settings.maxRetries': 2, - 'ai.settings.maxSteps': 1, - 'ai.streaming': false, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 15, @@ -351,22 +351,22 @@ describe('Vercel AI integration', () => { // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'ai.model.id': 'mock-model-id', - 'ai.model.provider': 'mock-provider', - 'ai.operationId': 'ai.generateText.doGenerate', - 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.prompt.format': expect.any(String), + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.prompt.format': expect.any(String), 'gen_ai.request.messages': expect.any(String), - 'ai.prompt.toolChoice': expect.any(String), + 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': expect.any(Array), - 'ai.response.finishReason': 'tool-calls', - 'ai.response.id': expect.any(String), - 'ai.response.model': 'mock-model-id', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', 'gen_ai.response.text': 'Tool call completed!', - 'ai.response.timestamp': expect.any(String), + 'vercel.ai.response.timestamp': expect.any(String), 'gen_ai.response.tool_calls': expect.any(String), - 'ai.settings.maxRetries': 2, - 'ai.streaming': false, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), @@ -387,7 +387,7 @@ describe('Vercel AI integration', () => { // Seventh span - tool call execution span expect.objectContaining({ data: { - 'ai.operationId': 'ai.toolCall', + 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.input': expect.any(String), diff --git a/packages/core/src/utils/vercel-ai.ts b/packages/core/src/utils/vercel-ai.ts index 401c295c97c9..4717e2cf87c7 100644 --- a/packages/core/src/utils/vercel-ai.ts +++ b/packages/core/src/utils/vercel-ai.ts @@ -99,6 +99,13 @@ function processEndedVercelAiSpan(span: SpanJSON): void { renameAttributeKey(attributes, AI_TOOL_CALL_ARGS_ATTRIBUTE, 'gen_ai.tool.input'); renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, 'gen_ai.tool.output'); + + // Change attributes namespaced with `ai.X` to `vercel.ai.X` + for (const key of Object.keys(attributes)) { + if (key.startsWith('ai.')) { + renameAttributeKey(attributes, key, `vercel.${key}`); + } + } } /**