diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 720345cc7d86..94fd0dde8486 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -197,73 +197,6 @@ describe('Vercel AI integration', () => { ]), }; - // Todo: Add missing attribute spans for v5 - // Right now only second span is recorded as it's manually opted in via explicit telemetry option - const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_V5 = { - transaction: 'main', - spans: expect.arrayContaining([ - expect.objectContaining({ - data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', - 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - // 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', - }, - description: 'generateText', - op: 'gen_ai.invoke_agent', - origin: 'auto.vercelai.otel', - status: 'ok', - }), - // doGenerate - expect.objectContaining({ - data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'operation.name': 'ai.generateText.doGenerate', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.streaming': false, - 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.model': 'mock-model-id', - 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': 'Second span here!', - 'vercel.ai.response.timestamp': expect.any(String), - // 'vercel.ai.prompt.format': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, - }, - description: 'generate_text mock-model-id', - op: 'gen_ai.generate_text', - origin: 'auto.vercelai.otel', - status: 'ok', - }), - ]), - }; - const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { transaction: 'main', spans: expect.arrayContaining([ @@ -605,23 +538,6 @@ describe('Vercel AI integration', () => { }); }); - // Test with specific Vercel AI v5 version - createEsmAndCjsTests( - __dirname, - 'scenario-v5.mjs', - 'instrument.mjs', - (createRunner, test) => { - test('creates ai related spans with v5', async () => { - await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_V5 }).start().completed(); - }); - }, - { - additionalDependencies: { - ai: '^5.0.0', - }, - }, - ); - createEsmAndCjsTests(__dirname, 'scenario-error-in-tool-express.mjs', 'instrument.mjs', (createRunner, test) => { test('captures error in tool in express server', async () => { const expectedTransaction = { diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument-with-pii.mjs new file mode 100644 index 000000000000..b798e21228f5 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument-with-pii.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: true, + transport: loggingTransport, + integrations: [Sentry.vercelAIIntegration()], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument.mjs new file mode 100644 index 000000000000..5e898ee1949d --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument.mjs @@ -0,0 +1,10 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + transport: loggingTransport, + integrations: [Sentry.vercelAIIntegration()], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario-error-in-tool.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario-error-in-tool.mjs new file mode 100644 index 000000000000..9ba3ac4b7d4a --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario-error-in-tool.mjs @@ -0,0 +1,36 @@ +import * as Sentry from '@sentry/node'; +import { generateText, tool } from 'ai'; +import { MockLanguageModelV2 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + await generateText({ + model: new MockLanguageModelV2({ + doGenerate: async () => ({ + finishReason: 'tool-calls', + usage: { inputTokens: 15, outputTokens: 25, totalTokens: 40 }, + content: [ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'getWeather', + input: JSON.stringify({ location: 'San Francisco' }), + }, + ], + }), + }), + tools: { + getWeather: tool({ + inputSchema: z.object({ location: z.string() }), + execute: async () => { + throw new Error('Error in tool'); + }, + }), + }, + prompt: 'What is the weather in San Francisco?', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-v5.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs similarity index 82% rename from dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-v5.mjs rename to dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs index 8cfe6d64ad05..9ef1b8000741 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-v5.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs @@ -1,5 +1,5 @@ import * as Sentry from '@sentry/node'; -import { generateText } from 'ai'; +import { generateText, tool } from 'ai'; import { MockLanguageModelV2 } from 'ai/test'; import { z } from 'zod'; @@ -35,24 +35,21 @@ async function run() { doGenerate: async () => ({ finishReason: 'tool-calls', usage: { inputTokens: 15, outputTokens: 25, totalTokens: 40 }, - content: [{ type: 'text', text: 'Tool call completed!' }], - toolCalls: [ + content: [ { - toolCallType: 'function', + type: 'tool-call', toolCallId: 'call-1', toolName: 'getWeather', - args: '{ "location": "San Francisco" }', + input: JSON.stringify({ location: 'San Francisco' }), }, ], }), }), tools: { - getWeather: { - parameters: z.object({ location: z.string() }), - execute: async args => { - return `Weather in ${args.location}: Sunny, 72°F`; - }, - }, + getWeather: tool({ + inputSchema: z.object({ location: z.string() }), + execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, + }), }, prompt: 'What is the weather in San Francisco?', }); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts new file mode 100644 index 000000000000..3ea923c5c034 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -0,0 +1,567 @@ +import type { Event } from '@sentry/node'; +import { afterAll, describe, expect } from 'vitest'; +import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; + +describe('Vercel AI integration (V5)', () => { + afterAll(() => { + cleanupChildProcesses(); + }); + + const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, + 'operation.name': 'ai.generateText', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generateText', + op: 'gen_ai.invoke_agent', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false + expect.objectContaining({ + data: { + 'sentry.origin': 'auto.vercelai.otel', + 'sentry.op': 'gen_ai.generate_text', + 'operation.name': 'ai.generateText.doGenerate', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.settings.maxRetries': 2, + 'gen_ai.system': 'mock-provider', + 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.streaming': false, + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.timestamp': expect.any(String), + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.total_tokens': 30, + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'gen_ai.response.text': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, + 'operation.name': 'ai.generateText', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generateText', + op: 'gen_ai.invoke_agent', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Fourth span - doGenerate for explicit telemetry enabled call + expect.objectContaining({ + data: { + 'sentry.origin': 'auto.vercelai.otel', + 'sentry.op': 'gen_ai.generate_text', + 'operation.name': 'ai.generateText.doGenerate', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.settings.maxRetries': 2, + 'gen_ai.system': 'mock-provider', + 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.streaming': false, + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.id': expect.any(String), + 'gen_ai.response.text': expect.any(String), + 'vercel.ai.response.timestamp': expect.any(String), + 'gen_ai.request.messages': expect.any(String), + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.total_tokens': 30, + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Fifth span - tool call generateText span + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generateText', + op: 'gen_ai.invoke_agent', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Sixth span - tool call doGenerate span + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['tool-calls'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Seventh span - tool call execution span + expect.objectContaining({ + data: { + 'vercel.ai.operationId': 'ai.toolCall', + 'gen_ai.tool.call.id': 'call-1', + 'gen_ai.tool.name': 'getWeather', + 'gen_ai.tool.type': 'function', + 'operation.name': 'ai.toolCall', + 'sentry.op': 'gen_ai.execute_tool', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'execute_tool getWeather', + op: 'gen_ai.execute_tool', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + ]), + }; + + const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'gen_ai.response.text': 'First span here!', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, + 'operation.name': 'ai.generateText', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generateText', + op: 'gen_ai.invoke_agent', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'First span here!', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, + 'operation.name': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'gen_ai.response.text': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, + 'operation.name': 'ai.generateText', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generateText', + op: 'gen_ai.invoke_agent', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Fourth span - doGenerate for explicitly enabled telemetry call + expect.objectContaining({ + data: { + 'sentry.origin': 'auto.vercelai.otel', + 'sentry.op': 'gen_ai.generate_text', + 'operation.name': 'ai.generateText.doGenerate', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.settings.maxRetries': 2, + 'gen_ai.system': 'mock-provider', + 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.streaming': false, + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.id': expect.any(String), + 'gen_ai.response.text': expect.any(String), + 'vercel.ai.response.timestamp': expect.any(String), + 'gen_ai.request.messages': expect.any(String), + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.total_tokens': 30, + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'vercel.ai.response.finishReason': 'tool-calls', + // 'gen_ai.response.text': 'Tool call completed!', + 'gen_ai.response.tool_calls': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generateText', + op: 'gen_ai.invoke_agent', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'gen_ai.request.messages': expect.any(String), + 'vercel.ai.prompt.toolChoice': expect.any(String), + 'gen_ai.request.available_tools': expect.any(Array), + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set + 'vercel.ai.response.timestamp': expect.any(String), + 'gen_ai.response.tool_calls': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['tool-calls'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Seventh span - tool call execution span + expect.objectContaining({ + data: { + 'vercel.ai.operationId': 'ai.toolCall', + 'gen_ai.tool.call.id': 'call-1', + 'gen_ai.tool.name': 'getWeather', + 'gen_ai.tool.input': expect.any(String), + 'gen_ai.tool.output': expect.any(String), + 'gen_ai.tool.type': 'function', + 'operation.name': 'ai.toolCall', + 'sentry.op': 'gen_ai.execute_tool', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'execute_tool getWeather', + op: 'gen_ai.execute_tool', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + ]), + }; + + createEsmAndCjsTests( + __dirname, + 'scenario.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates ai related spans with sendDefaultPii: false', async () => { + await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed(); + }); + }, + { + additionalDependencies: { + ai: '^5.0.0', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario.mjs', + 'instrument-with-pii.mjs', + (createRunner, test) => { + test('creates ai related spans with sendDefaultPii: true', async () => { + await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }).start().completed(); + }); + }, + { + additionalDependencies: { + ai: '^5.0.0', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-error-in-tool.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('captures error in tool', async () => { + const expectedTransaction = { + transaction: 'main', + spans: expect.arrayContaining([ + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.response.finishReason': 'tool-calls', + }, + description: 'generateText', + op: 'gen_ai.invoke_agent', + origin: 'auto.vercelai.otel', + }), + expect.objectContaining({ + data: { + 'vercel.ai.model.id': 'mock-model-id', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['tool-calls'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + expect.objectContaining({ + data: { + 'vercel.ai.operationId': 'ai.toolCall', + 'gen_ai.tool.call.id': 'call-1', + 'gen_ai.tool.name': 'getWeather', + 'gen_ai.tool.type': 'function', + 'operation.name': 'ai.toolCall', + 'sentry.op': 'gen_ai.execute_tool', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'execute_tool getWeather', + op: 'gen_ai.execute_tool', + origin: 'auto.vercelai.otel', + status: 'unknown_error', + }), + ]), + }; + + const expectedError = { + level: 'error', + tags: expect.objectContaining({ + 'vercel.ai.tool.name': 'getWeather', + 'vercel.ai.tool.callId': 'call-1', + }), + }; + + let transactionEvent: Event | undefined; + let errorEvent: Event | undefined; + + await createRunner() + .expect({ + transaction: transaction => { + transactionEvent = transaction; + }, + }) + .expect({ + event: event => { + errorEvent = event; + }, + }) + .start() + .completed(); + + // eslint-disable-next-line no-console + console.log('expectedError', expectedError); + + expect(transactionEvent).toBeDefined(); + expect(transactionEvent).toMatchObject(expectedTransaction); + + expect(errorEvent).toBeDefined(); + expect(errorEvent).toMatchObject(expectedError); + + // Trace id should be the same for the transaction and error event + expect(transactionEvent!.contexts!.trace!.trace_id).toBe(errorEvent!.contexts!.trace!.trace_id); + }); + }, + { + additionalDependencies: { + ai: '^5.0.0', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates ai related spans with v5', async () => { + await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed(); + }); + }, + { + additionalDependencies: { + ai: '^5.0.0', + }, + }, + ); +}); diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 0747258113a9..c23ad708db17 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -118,6 +118,7 @@ export type { ReportDialogOptions } from './report-dialog'; export { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_captureSerializedLog } from './logs/exports'; export { consoleLoggingIntegration } from './logs/console-integration'; export { addVercelAiProcessors } from './utils/vercel-ai'; +export { getSpanForToolCallId, cleanupToolCallSpan } from './utils/vercel-ai/utils'; export { instrumentOpenAiClient } from './utils/openai'; export { OPENAI_INTEGRATION_NAME } from './utils/openai/constants'; export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './utils/openai/types'; diff --git a/packages/core/src/utils/vercel-ai/constants.ts b/packages/core/src/utils/vercel-ai/constants.ts new file mode 100644 index 000000000000..fe307b03e7fb --- /dev/null +++ b/packages/core/src/utils/vercel-ai/constants.ts @@ -0,0 +1,5 @@ +import type { Span } from '../../types-hoist/span'; + +// Global Map to track tool call IDs to their corresponding spans +// This allows us to capture tool errors and link them to the correct span +export const toolCallSpanMap = new Map(); diff --git a/packages/core/src/utils/vercel-ai.ts b/packages/core/src/utils/vercel-ai/index.ts similarity index 85% rename from packages/core/src/utils/vercel-ai.ts rename to packages/core/src/utils/vercel-ai/index.ts index 4ef437a1b922..4b317fe653d6 100644 --- a/packages/core/src/utils/vercel-ai.ts +++ b/packages/core/src/utils/vercel-ai/index.ts @@ -1,8 +1,11 @@ -import type { Client } from '../client'; -import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../semanticAttributes'; -import type { Event } from '../types-hoist/event'; -import type { Span, SpanAttributes, SpanAttributeValue, SpanJSON, SpanOrigin } from '../types-hoist/span'; -import { spanToJSON } from './spanUtils'; +import type { Client } from '../../client'; +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; +import type { Event } from '../../types-hoist/event'; +import type { Span, SpanAttributes, SpanAttributeValue, SpanJSON, SpanOrigin } from '../../types-hoist/span'; +import { spanToJSON } from '../spanUtils'; +import { toolCallSpanMap } from './constants'; +import type { TokenSummary } from './types'; +import { accumulateTokensForParent, applyAccumulatedTokens } from './utils'; import type { ProviderMetadata } from './vercel-ai-attributes'; import { AI_MODEL_ID_ATTRIBUTE, @@ -60,11 +63,6 @@ function onVercelAiSpanStart(span: Span): void { processGenerateSpan(span, name, attributes); } -interface TokenSummary { - inputTokens: number; - outputTokens: number; -} - function vercelAiEventProcessor(event: Event): Event { if (event.type === 'transaction' && event.spans) { // Map to accumulate token data by parent span ID @@ -86,6 +84,12 @@ function vercelAiEventProcessor(event: Event): Event { applyAccumulatedTokens(span, tokenAccumulator); } + + // Also apply to root when it is the invoke_agent pipeline + const trace = event.contexts?.trace; + if (trace && trace.op === 'gen_ai.invoke_agent') { + applyAccumulatedTokens(trace, tokenAccumulator); + } } return event; @@ -148,6 +152,15 @@ function processToolCallSpan(span: Span, attributes: SpanAttributes): void { span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, 'gen_ai.tool.name'); renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, 'gen_ai.tool.call.id'); + + // Store the span in our global map using the tool call ID + // This allows us to capture tool errors and link them to the correct span + const toolCallId = attributes['gen_ai.tool.call.id']; + + if (typeof toolCallId === 'string') { + toolCallSpanMap.set(toolCallId, span); + } + // https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-type if (!attributes['gen_ai.tool.type']) { span.setAttribute('gen_ai.tool.type', 'function'); @@ -262,56 +275,6 @@ export function addVercelAiProcessors(client: Client): void { client.addEventProcessor(Object.assign(vercelAiEventProcessor, { id: 'VercelAiEventProcessor' })); } -/** - * Accumulates token data from a span to its parent in the token accumulator map. - * This function extracts token usage from the current span and adds it to the - * accumulated totals for its parent span. - */ -function accumulateTokensForParent(span: SpanJSON, tokenAccumulator: Map): void { - const parentSpanId = span.parent_span_id; - if (!parentSpanId) { - return; - } - - const inputTokens = span.data[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; - const outputTokens = span.data[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]; - - if (typeof inputTokens === 'number' || typeof outputTokens === 'number') { - const existing = tokenAccumulator.get(parentSpanId) || { inputTokens: 0, outputTokens: 0 }; - - if (typeof inputTokens === 'number') { - existing.inputTokens += inputTokens; - } - if (typeof outputTokens === 'number') { - existing.outputTokens += outputTokens; - } - - tokenAccumulator.set(parentSpanId, existing); - } -} - -/** - * Applies accumulated token data to the `gen_ai.invoke_agent` span. - * Only immediate children of the `gen_ai.invoke_agent` span are considered, - * since aggregation will automatically occur for each parent span. - */ -function applyAccumulatedTokens(span: SpanJSON, tokenAccumulator: Map): void { - const accumulated = tokenAccumulator.get(span.span_id); - if (!accumulated) { - return; - } - - if (accumulated.inputTokens > 0) { - span.data[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = accumulated.inputTokens; - } - if (accumulated.outputTokens > 0) { - span.data[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = accumulated.outputTokens; - } - if (accumulated.inputTokens > 0 || accumulated.outputTokens > 0) { - span.data['gen_ai.usage.total_tokens'] = accumulated.inputTokens + accumulated.outputTokens; - } -} - function addProviderMetadataToAttributes(attributes: SpanAttributes): void { const providerMetadata = attributes[AI_RESPONSE_PROVIDER_METADATA_ATTRIBUTE] as string | undefined; if (providerMetadata) { diff --git a/packages/core/src/utils/vercel-ai/types.ts b/packages/core/src/utils/vercel-ai/types.ts new file mode 100644 index 000000000000..03f22c415001 --- /dev/null +++ b/packages/core/src/utils/vercel-ai/types.ts @@ -0,0 +1,4 @@ +export interface TokenSummary { + inputTokens: number; + outputTokens: number; +} diff --git a/packages/core/src/utils/vercel-ai/utils.ts b/packages/core/src/utils/vercel-ai/utils.ts new file mode 100644 index 000000000000..cb85543c3376 --- /dev/null +++ b/packages/core/src/utils/vercel-ai/utils.ts @@ -0,0 +1,72 @@ +import type { TraceContext } from '../../types-hoist/context'; +import type { Span, SpanJSON } from '../../types-hoist/span'; +import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE } from '../gen-ai-attributes'; +import { toolCallSpanMap } from './constants'; +import type { TokenSummary } from './types'; + +/** + * Accumulates token data from a span to its parent in the token accumulator map. + * This function extracts token usage from the current span and adds it to the + * accumulated totals for its parent span. + */ +export function accumulateTokensForParent(span: SpanJSON, tokenAccumulator: Map): void { + const parentSpanId = span.parent_span_id; + if (!parentSpanId) { + return; + } + + const inputTokens = span.data[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; + const outputTokens = span.data[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]; + + if (typeof inputTokens === 'number' || typeof outputTokens === 'number') { + const existing = tokenAccumulator.get(parentSpanId) || { inputTokens: 0, outputTokens: 0 }; + + if (typeof inputTokens === 'number') { + existing.inputTokens += inputTokens; + } + if (typeof outputTokens === 'number') { + existing.outputTokens += outputTokens; + } + + tokenAccumulator.set(parentSpanId, existing); + } +} + +/** + * Applies accumulated token data to the `gen_ai.invoke_agent` span. + * Only immediate children of the `gen_ai.invoke_agent` span are considered, + * since aggregation will automatically occur for each parent span. + */ +export function applyAccumulatedTokens( + spanOrTrace: SpanJSON | TraceContext, + tokenAccumulator: Map, +): void { + const accumulated = tokenAccumulator.get(spanOrTrace.span_id); + if (!accumulated || !spanOrTrace.data) { + return; + } + + if (accumulated.inputTokens > 0) { + spanOrTrace.data[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = accumulated.inputTokens; + } + if (accumulated.outputTokens > 0) { + spanOrTrace.data[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = accumulated.outputTokens; + } + if (accumulated.inputTokens > 0 || accumulated.outputTokens > 0) { + spanOrTrace.data['gen_ai.usage.total_tokens'] = accumulated.inputTokens + accumulated.outputTokens; + } +} + +/** + * Get the span associated with a tool call ID + */ +export function getSpanForToolCallId(toolCallId: string): Span | undefined { + return toolCallSpanMap.get(toolCallId); +} + +/** + * Clean up the span mapping for a tool call ID + */ +export function cleanupToolCallSpan(toolCallId: string): void { + toolCallSpanMap.delete(toolCallId); +} diff --git a/packages/core/src/utils/vercel-ai-attributes.ts b/packages/core/src/utils/vercel-ai/vercel-ai-attributes.ts similarity index 100% rename from packages/core/src/utils/vercel-ai-attributes.ts rename to packages/core/src/utils/vercel-ai/vercel-ai-attributes.ts diff --git a/packages/node/src/integrations/tracing/vercelai/instrumentation.ts b/packages/node/src/integrations/tracing/vercelai/instrumentation.ts index 22ec18a682f0..0b70dc89c3fb 100644 --- a/packages/node/src/integrations/tracing/vercelai/instrumentation.ts +++ b/packages/node/src/integrations/tracing/vercelai/instrumentation.ts @@ -1,11 +1,16 @@ import type { InstrumentationConfig, InstrumentationModuleDefinition } from '@opentelemetry/instrumentation'; import { InstrumentationBase, InstrumentationNodeModuleDefinition } from '@opentelemetry/instrumentation'; +import type { Span } from '@sentry/core'; import { addNonEnumerableProperty, + captureException, + cleanupToolCallSpan, getActiveSpan, getCurrentScope, + getSpanForToolCallId, handleCallbackErrors, SDK_VERSION, + withScope, } from '@sentry/core'; import { INTEGRATION_NAME } from './constants'; import type { TelemetrySettings, VercelAiIntegration } from './types'; @@ -35,6 +40,101 @@ interface RecordingOptions { recordOutputs?: boolean; } +interface ToolError { + type: 'tool-error' | 'tool-result' | 'tool-call'; + toolCallId: string; + toolName: string; + input?: { + [key: string]: unknown; + }; + error: Error; + dynamic?: boolean; +} + +function isToolError(obj: unknown): obj is ToolError { + if (typeof obj !== 'object' || obj === null) { + return false; + } + + const candidate = obj as Record; + return ( + 'type' in candidate && + 'error' in candidate && + 'toolName' in candidate && + 'toolCallId' in candidate && + candidate.type === 'tool-error' && + candidate.error instanceof Error + ); +} + +/** + * Check for tool errors in the result and capture them + * Tool errors are not rejected in Vercel V5, it is added as metadata to the result content + */ +function checkResultForToolErrors(result: unknown | Promise): void { + if (typeof result !== 'object' || result === null || !('content' in result)) { + return; + } + + const resultObj = result as { content: Array }; + if (!Array.isArray(resultObj.content)) { + return; + } + + for (const item of resultObj.content) { + if (isToolError(item)) { + // Try to get the span associated with this tool call ID + const associatedSpan = getSpanForToolCallId(item.toolCallId) as Span; + + if (associatedSpan) { + // We have the span, so link the error using span and trace IDs from the span + const spanContext = associatedSpan.spanContext(); + + withScope(scope => { + // Set the span and trace context for proper linking + scope.setContext('trace', { + trace_id: spanContext.traceId, + span_id: spanContext.spanId, + }); + + // Add tool-specific tags + scope.setTag('vercel.ai.tool.name', item.toolName); + scope.setTag('vercel.ai.tool.callId', item.toolCallId); + scope.setTag('vercel.ai.tool.span.id', spanContext.spanId); + scope.setTag('vercel.ai.tool.trace.id', spanContext.traceId); + + scope.setLevel('error'); + + captureException(item.error, { + mechanism: { + type: 'auto.vercelai.otel', + handled: false, + }, + }); + }); + + // Clean up the span mapping since we've processed this tool error + // We won't get multiple { type: 'tool-error' } parts for the same toolCallId. + cleanupToolCallSpan(item.toolCallId); + } else { + // Fallback: capture without span linking + withScope(scope => { + scope.setTag('vercel.ai.tool.name', item.toolName); + scope.setTag('vercel.ai.tool.callId', item.toolCallId); + scope.setLevel('error'); + + captureException(item.error, { + mechanism: { + type: 'auto.vercelai.otel', + handled: false, + }, + }); + }); + } + } + } +} + /** * Determines whether to record inputs and outputs for Vercel AI telemetry based on the configuration hierarchy. * @@ -89,7 +189,7 @@ export class SentryVercelAiInstrumentation extends InstrumentationBase { * Initializes the instrumentation by defining the modules to be patched. */ public init(): InstrumentationModuleDefinition { - const module = new InstrumentationNodeModuleDefinition('ai', ['>=3.0.0 <5'], this._patch.bind(this)); + const module = new InstrumentationNodeModuleDefinition('ai', ['>=3.0.0 <6'], this._patch.bind(this)); return module; } @@ -139,9 +239,14 @@ export class SentryVercelAiInstrumentation extends InstrumentationBase { }; return handleCallbackErrors( - () => { + async () => { // @ts-expect-error we know that the method exists - return originalMethod.apply(this, args); + const result = await originalMethod.apply(this, args); + + // Tool errors are not rejected in Vercel V5, it is added as metadata to the result content + checkResultForToolErrors(result); + + return result; }, error => { // This error bubbles up to unhandledrejection handler (if not handled before),