diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/AI_INTEGRATION_SUMMARY.md b/dev-packages/e2e-tests/test-applications/nextjs-15/AI_INTEGRATION_SUMMARY.md new file mode 100644 index 000000000000..7db94d9736ed --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nextjs-15/AI_INTEGRATION_SUMMARY.md @@ -0,0 +1,59 @@ +# Vercel AI Integration - Next.js 15 E2E Test Implementation + +## Overview +This document summarizes the implementation of the Vercel AI integration for the Next.js 15 E2E test application. + +## Changes Made + +### 1. Updated Dependencies (package.json) +Added the following dependencies: +- `ai`: ^3.0.0 - Vercel AI SDK +- `zod`: ^3.22.4 - For tool parameter schemas + +### 2. Server Configuration (sentry.server.config.ts) +Added the Vercel AI integration to the Sentry initialization: +```typescript +integrations: [ + Sentry.vercelAIIntegration(), +], +``` + +### 3. Test Page (app/ai-test/page.tsx) +Created a new test page that demonstrates various AI SDK features: +- Basic text generation with automatic telemetry +- Explicit telemetry configuration +- Tool calls and execution +- Disabled telemetry + +The page wraps AI operations in a Sentry span for proper tracing. + +### 4. Test Suite (tests/ai-test.test.ts) +Created a Playwright test that verifies: +- AI spans are created with correct operations (`ai.pipeline.generate_text`, `gen_ai.generate_text`, `gen_ai.execute_tool`) +- Span attributes match expected values (model info, tokens, prompts, etc.) +- Input/output recording respects `sendDefaultPii: true` setting +- Tool calls are properly traced +- Disabled telemetry prevents span creation + +## Expected Behavior + +When `sendDefaultPii: true` (as configured in this test app): +1. AI operations automatically enable telemetry +2. Input prompts and output responses are recorded in spans +3. Tool calls include arguments and results +4. Token usage is tracked + +## Running the Tests + +Prerequisites: +1. Build packages: `yarn build:tarball` (from repository root) +2. Start the test registry (Verdaccio) +3. Run the test: `yarn test:e2e nextjs-15` or `yarn test:run nextjs-15` + +## Instrumentation Notes + +The Vercel AI integration uses OpenTelemetry instrumentation to automatically patch the `ai` module methods. The instrumentation: +- Enables telemetry by default for all AI operations +- Respects the `sendDefaultPii` client option for recording inputs/outputs +- Allows per-call telemetry configuration via `experimental_telemetry` +- Follows a precedence hierarchy: integration options > method options > defaults diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/app/ai-test/page.tsx b/dev-packages/e2e-tests/test-applications/nextjs-15/app/ai-test/page.tsx new file mode 100644 index 000000000000..828e92baf62a --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nextjs-15/app/ai-test/page.tsx @@ -0,0 +1,101 @@ +import { generateText } from 'ai'; +import { MockLanguageModelV1 } from 'ai/test'; +import { z } from 'zod'; +import * as Sentry from '@sentry/nextjs'; + +export const dynamic = 'force-dynamic'; + +async function runAITest() { + // First span - telemetry should be enabled automatically but no input/output recorded when sendDefaultPii: true + const result1 = await generateText({ + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'stop', + usage: { promptTokens: 10, completionTokens: 20 }, + text: 'First span here!', + }), + }), + prompt: 'Where is the first span?', + }); + + // Second span - explicitly enabled telemetry, should record inputs/outputs + const result2 = await generateText({ + experimental_telemetry: { isEnabled: true }, + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'stop', + usage: { promptTokens: 10, completionTokens: 20 }, + text: 'Second span here!', + }), + }), + prompt: 'Where is the second span?', + }); + + // Third span - with tool calls and tool results + const result3 = await generateText({ + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'tool-calls', + usage: { promptTokens: 15, completionTokens: 25 }, + text: 'Tool call completed!', + toolCalls: [ + { + toolCallType: 'function', + toolCallId: 'call-1', + toolName: 'getWeather', + args: '{ "location": "San Francisco" }', + }, + ], + }), + }), + tools: { + getWeather: { + parameters: z.object({ location: z.string() }), + execute: async (args) => { + return `Weather in ${args.location}: Sunny, 72°F`; + }, + }, + }, + prompt: 'What is the weather in San Francisco?', + }); + + // Fourth span - explicitly disabled telemetry, should not be captured + const result4 = await generateText({ + experimental_telemetry: { isEnabled: false }, + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'stop', + usage: { promptTokens: 10, completionTokens: 20 }, + text: 'Third span here!', + }), + }), + prompt: 'Where is the third span?', + }); + + return { + result1: result1.text, + result2: result2.text, + result3: result3.text, + result4: result4.text, + }; +} + +export default async function Page() { + const results = await Sentry.startSpan( + { op: 'function', name: 'ai-test' }, + async () => { + return await runAITest(); + } + ); + + return ( +
{JSON.stringify(results, null, 2)}
+