Skip to content

Commit dbe6520

Browse files
authored
feat(Tracing): Add Vercel AI SDK v6 support (#18741)
This PR adds support for Vercel AI SDK v6 telemetry changes. ### Changes **Provider Metadata Updates** - Added `azure` key to `ProviderMetadata` interface for Azure Responses API (v6 uses `azure` instead of `openai` for Azure provider) - Added `vertex` key for Google Vertex provider (v6 uses `vertex` instead of `google`) - Updated `addProviderMetadataToAttributes` to check both old and new keys for backward compatibility **V6 Test Suite** - Added integration tests for Vercel AI SDK v6 using `MockLanguageModelV3` - Updated mock scenarios to match v6's new data structures: - `usage` now uses object format: `{ total, noCache, cached }` - `finishReason` now uses object format: `{ unified, raw }` - Added `vercel.ai.request.headers.user-agent` attribute to test expectations Closes #18691
1 parent fae3a77 commit dbe6520

File tree

8 files changed

+748
-13
lines changed

8 files changed

+748
-13
lines changed
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
integrations: [Sentry.vercelAIIntegration()],
11+
});
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
transport: loggingTransport,
9+
integrations: [Sentry.vercelAIIntegration()],
10+
});
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import * as Sentry from '@sentry/node';
2+
import { generateText, tool } from 'ai';
3+
import { MockLanguageModelV3 } from 'ai/test';
4+
import { z } from 'zod';
5+
6+
async function run() {
7+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
8+
await generateText({
9+
model: new MockLanguageModelV3({
10+
doGenerate: async () => ({
11+
finishReason: { unified: 'tool-calls', raw: 'tool_calls' },
12+
usage: {
13+
inputTokens: { total: 15, noCache: 15, cached: 0 },
14+
outputTokens: { total: 25, noCache: 25, cached: 0 },
15+
totalTokens: { total: 40, noCache: 40, cached: 0 },
16+
},
17+
content: [
18+
{
19+
type: 'tool-call',
20+
toolCallId: 'call-1',
21+
toolName: 'getWeather',
22+
input: JSON.stringify({ location: 'San Francisco' }),
23+
},
24+
],
25+
warnings: [],
26+
}),
27+
}),
28+
tools: {
29+
getWeather: tool({
30+
inputSchema: z.object({ location: z.string() }),
31+
execute: async () => {
32+
throw new Error('Error in tool');
33+
},
34+
}),
35+
},
36+
prompt: 'What is the weather in San Francisco?',
37+
});
38+
});
39+
}
40+
41+
run();
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
import * as Sentry from '@sentry/node';
2+
import { generateText, tool } from 'ai';
3+
import { MockLanguageModelV3 } from 'ai/test';
4+
import { z } from 'zod';
5+
6+
async function run() {
7+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
8+
await generateText({
9+
model: new MockLanguageModelV3({
10+
doGenerate: async () => ({
11+
finishReason: { unified: 'stop', raw: 'stop' },
12+
usage: {
13+
inputTokens: { total: 10, noCache: 10, cached: 0 },
14+
outputTokens: { total: 20, noCache: 20, cached: 0 },
15+
totalTokens: { total: 30, noCache: 30, cached: 0 },
16+
},
17+
content: [{ type: 'text', text: 'First span here!' }],
18+
warnings: [],
19+
}),
20+
}),
21+
prompt: 'Where is the first span?',
22+
});
23+
24+
// This span should have input and output prompts attached because telemetry is explicitly enabled.
25+
await generateText({
26+
experimental_telemetry: { isEnabled: true },
27+
model: new MockLanguageModelV3({
28+
doGenerate: async () => ({
29+
finishReason: { unified: 'stop', raw: 'stop' },
30+
usage: {
31+
inputTokens: { total: 10, noCache: 10, cached: 0 },
32+
outputTokens: { total: 20, noCache: 20, cached: 0 },
33+
totalTokens: { total: 30, noCache: 30, cached: 0 },
34+
},
35+
content: [{ type: 'text', text: 'Second span here!' }],
36+
warnings: [],
37+
}),
38+
}),
39+
prompt: 'Where is the second span?',
40+
});
41+
42+
// This span should include tool calls and tool results
43+
await generateText({
44+
model: new MockLanguageModelV3({
45+
doGenerate: async () => ({
46+
finishReason: { unified: 'tool-calls', raw: 'tool_calls' },
47+
usage: {
48+
inputTokens: { total: 15, noCache: 15, cached: 0 },
49+
outputTokens: { total: 25, noCache: 25, cached: 0 },
50+
totalTokens: { total: 40, noCache: 40, cached: 0 },
51+
},
52+
content: [
53+
{
54+
type: 'tool-call',
55+
toolCallId: 'call-1',
56+
toolName: 'getWeather',
57+
input: JSON.stringify({ location: 'San Francisco' }),
58+
},
59+
],
60+
warnings: [],
61+
}),
62+
}),
63+
tools: {
64+
getWeather: tool({
65+
inputSchema: z.object({ location: z.string() }),
66+
execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`,
67+
}),
68+
},
69+
prompt: 'What is the weather in San Francisco?',
70+
});
71+
72+
// This span should not be captured because we've disabled telemetry
73+
await generateText({
74+
experimental_telemetry: { isEnabled: false },
75+
model: new MockLanguageModelV3({
76+
doGenerate: async () => ({
77+
finishReason: { unified: 'stop', raw: 'stop' },
78+
usage: {
79+
inputTokens: { total: 10, noCache: 10, cached: 0 },
80+
outputTokens: { total: 20, noCache: 20, cached: 0 },
81+
totalTokens: { total: 30, noCache: 30, cached: 0 },
82+
},
83+
content: [{ type: 'text', text: 'Third span here!' }],
84+
warnings: [],
85+
}),
86+
}),
87+
prompt: 'Where is the third span?',
88+
});
89+
});
90+
}
91+
92+
run();

0 commit comments

Comments
 (0)