Skip to content

Commit 1f8ed69

Browse files
committed
feat(cloudflare,vercel-edge): Add support for OpenAI instrumentation
To instrument the OpenAI client, wrap it with `Sentry.instrumentOpenAiClient`. ```js import * as Sentry from '@sentry/cloudflare'; import OpenAI from 'openai'; const openai = new OpenAI(); const client = Sentry.instrumentOpenAiClient(openai, { recordInputs: true, recordOutputs: true }); // use the wrapped client ```
1 parent ce66380 commit 1f8ed69

File tree

8 files changed

+331
-0
lines changed

8 files changed

+331
-0
lines changed

dev-packages/cloudflare-integration-tests/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
"@sentry/cloudflare": "10.1.0"
1717
},
1818
"devDependencies": {
19+
"@sentry/core": "10.0.0",
1920
"@cloudflare/workers-types": "^4.20250708.0",
2021
"@sentry-internal/test-utils": "link:../test-utils",
2122
"vitest": "^3.2.4",
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import * as Sentry from '@sentry/cloudflare';
2+
import { MockOpenAi } from './mocks';
3+
4+
interface Env {
5+
SENTRY_DSN: string;
6+
}
7+
8+
const mockClient = new MockOpenAi({
9+
apiKey: 'mock-api-key',
10+
});
11+
12+
const client = Sentry.instrumentOpenAiClient(mockClient);
13+
14+
export default Sentry.withSentry(
15+
(env: Env) => ({
16+
dsn: env.SENTRY_DSN,
17+
tracesSampleRate: 1.0,
18+
}),
19+
{
20+
async fetch(_request, _env, _ctx) {
21+
const response = await client.chat?.completions?.create({
22+
model: 'gpt-3.5-turbo',
23+
messages: [
24+
{ role: 'system', content: 'You are a helpful assistant.' },
25+
{ role: 'user', content: 'What is the capital of France?' },
26+
],
27+
temperature: 0.7,
28+
max_tokens: 100,
29+
});
30+
31+
return new Response(JSON.stringify(response));
32+
},
33+
},
34+
);
Lines changed: 245 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,245 @@
1+
import type { OpenAiClient } from '@sentry/core';
2+
3+
export class MockOpenAi implements OpenAiClient {
4+
public chat?: Record<string, unknown> | undefined;
5+
public responses?: {
6+
create: (...args: unknown[]) => Promise<unknown>;
7+
};
8+
9+
public apiKey: string;
10+
11+
public constructor(config: { apiKey: string }) {
12+
this.apiKey = config.apiKey;
13+
14+
this.chat = {
15+
completions: {
16+
create: async (...args: unknown[]) => {
17+
const params = args[0] as { model: string; stream?: boolean };
18+
// Simulate processing time
19+
await new Promise(resolve => setTimeout(resolve, 10));
20+
21+
if (params.model === 'error-model') {
22+
const error = new Error('Model not found');
23+
(error as unknown as { status: number }).status = 404;
24+
(error as unknown as { headers: Record<string, string> }).headers = { 'x-request-id': 'mock-request-123' };
25+
throw error;
26+
}
27+
28+
// If stream is requested, return an async generator
29+
if (params.stream) {
30+
return this.createChatCompletionStream(params);
31+
}
32+
33+
return {
34+
id: 'chatcmpl-mock123',
35+
object: 'chat.completion',
36+
created: 1677652288,
37+
model: params.model,
38+
system_fingerprint: 'fp_44709d6fcb',
39+
choices: [
40+
{
41+
index: 0,
42+
message: {
43+
role: 'assistant',
44+
content: 'Hello from OpenAI mock!',
45+
},
46+
finish_reason: 'stop',
47+
},
48+
],
49+
usage: {
50+
prompt_tokens: 10,
51+
completion_tokens: 15,
52+
total_tokens: 25,
53+
},
54+
};
55+
},
56+
},
57+
};
58+
59+
this.responses = {
60+
create: async (...args: unknown[]) => {
61+
const params = args[0] as { model: string; input: string; instructions: string; stream?: boolean };
62+
await new Promise(resolve => setTimeout(resolve, 10));
63+
64+
// If stream is requested, return an async generator
65+
if (params.stream) {
66+
return this.createResponsesApiStream(params);
67+
}
68+
69+
return {
70+
id: 'resp_mock456',
71+
object: 'response',
72+
created_at: 1677652290,
73+
model: params.model,
74+
input_text: params.input,
75+
output_text: `Response to: ${params.input}`,
76+
status: 'completed',
77+
usage: {
78+
input_tokens: 5,
79+
output_tokens: 8,
80+
total_tokens: 13,
81+
},
82+
};
83+
},
84+
};
85+
}
86+
87+
// Create a mock streaming response for chat completions
88+
public async *createChatCompletionStream(params: { model: string }): AsyncGenerator<unknown> {
89+
// First chunk with basic info
90+
yield {
91+
id: 'chatcmpl-stream-123',
92+
object: 'chat.completion.chunk',
93+
created: 1677652300,
94+
model: params.model,
95+
system_fingerprint: 'fp_stream_123',
96+
choices: [
97+
{
98+
index: 0,
99+
delta: {
100+
role: 'assistant',
101+
content: 'Hello',
102+
},
103+
finish_reason: null,
104+
},
105+
],
106+
};
107+
108+
// Second chunk with more content
109+
yield {
110+
id: 'chatcmpl-stream-123',
111+
object: 'chat.completion.chunk',
112+
created: 1677652300,
113+
model: params.model,
114+
system_fingerprint: 'fp_stream_123',
115+
choices: [
116+
{
117+
index: 0,
118+
delta: {
119+
content: ' from OpenAI streaming!',
120+
},
121+
finish_reason: 'stop',
122+
},
123+
],
124+
usage: {
125+
prompt_tokens: 12,
126+
completion_tokens: 18,
127+
total_tokens: 30,
128+
completion_tokens_details: {
129+
accepted_prediction_tokens: 0,
130+
audio_tokens: 0,
131+
reasoning_tokens: 0,
132+
rejected_prediction_tokens: 0,
133+
},
134+
prompt_tokens_details: {
135+
audio_tokens: 0,
136+
cached_tokens: 0,
137+
},
138+
},
139+
};
140+
}
141+
142+
// Create a mock streaming response for responses API
143+
public async *createResponsesApiStream(params: {
144+
model: string;
145+
input: string;
146+
instructions: string;
147+
}): AsyncGenerator<unknown> {
148+
// Response created event
149+
yield {
150+
type: 'response.created',
151+
response: {
152+
id: 'resp_stream_456',
153+
object: 'response',
154+
created_at: 1677652310,
155+
model: params.model,
156+
status: 'in_progress',
157+
error: null,
158+
incomplete_details: null,
159+
instructions: params.instructions,
160+
max_output_tokens: 1000,
161+
parallel_tool_calls: false,
162+
previous_response_id: null,
163+
reasoning: {
164+
effort: null,
165+
summary: null,
166+
},
167+
store: false,
168+
temperature: 0.7,
169+
text: {
170+
format: {
171+
type: 'text',
172+
},
173+
},
174+
tool_choice: 'auto',
175+
top_p: 1.0,
176+
truncation: 'disabled',
177+
user: null,
178+
metadata: {},
179+
output: [],
180+
output_text: '',
181+
usage: {
182+
input_tokens: 0,
183+
output_tokens: 0,
184+
total_tokens: 0,
185+
},
186+
},
187+
sequence_number: 1,
188+
};
189+
190+
// Response in progress with output text delta
191+
yield {
192+
type: 'response.output_text.delta',
193+
delta: 'Streaming response to: ',
194+
sequence_number: 2,
195+
};
196+
197+
yield {
198+
type: 'response.output_text.delta',
199+
delta: params.input,
200+
sequence_number: 3,
201+
};
202+
203+
// Response completed event
204+
yield {
205+
type: 'response.completed',
206+
response: {
207+
id: 'resp_stream_456',
208+
object: 'response',
209+
created_at: 1677652310,
210+
model: params.model,
211+
status: 'completed',
212+
error: null,
213+
incomplete_details: null,
214+
instructions: params.instructions,
215+
max_output_tokens: 1000,
216+
parallel_tool_calls: false,
217+
previous_response_id: null,
218+
reasoning: {
219+
effort: null,
220+
summary: null,
221+
},
222+
store: false,
223+
temperature: 0.7,
224+
text: {
225+
format: {
226+
type: 'text',
227+
},
228+
},
229+
tool_choice: 'auto',
230+
top_p: 1.0,
231+
truncation: 'disabled',
232+
user: null,
233+
metadata: {},
234+
output: [],
235+
output_text: params.input,
236+
usage: {
237+
input_tokens: 6,
238+
output_tokens: 10,
239+
total_tokens: 16,
240+
},
241+
},
242+
sequence_number: 4,
243+
};
244+
}
245+
}
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import { expect, it } from 'vitest';
2+
import { createRunner } from '../../../runner';
3+
4+
it('traces a basic chat completion request', async () => {
5+
const runner = createRunner(__dirname)
6+
.ignore('event')
7+
.expect(envelope => {
8+
const transactionEvent = envelope[1]?.[0]?.[1];
9+
10+
expect(transactionEvent.transaction).toBe('GET /');
11+
expect(transactionEvent.spans).toEqual(
12+
expect.arrayContaining([
13+
expect.objectContaining({
14+
data: expect.objectContaining({
15+
'gen_ai.operation.name': 'chat',
16+
'sentry.op': 'gen_ai.chat',
17+
'gen_ai.system': 'openai',
18+
'gen_ai.request.model': 'gpt-3.5-turbo',
19+
'gen_ai.request.temperature': 0.7,
20+
'gen_ai.response.model': 'gpt-3.5-turbo',
21+
'gen_ai.response.id': 'chatcmpl-mock123',
22+
'gen_ai.usage.input_tokens': 10,
23+
'gen_ai.usage.output_tokens': 15,
24+
'gen_ai.usage.total_tokens': 25,
25+
'gen_ai.response.finish_reasons': '["stop"]',
26+
}),
27+
description: 'chat gpt-3.5-turbo',
28+
op: 'gen_ai.chat',
29+
origin: 'manual',
30+
}),
31+
]),
32+
);
33+
})
34+
.start();
35+
await runner.makeRequest('get', '/');
36+
await runner.completed();
37+
});
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"name": "worker-name",
3+
"compatibility_date": "2025-06-17",
4+
"main": "index.ts",
5+
"compatibility_flags": ["nodejs_compat"]
6+
}

dev-packages/cloudflare-integration-tests/vite.config.mts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,12 @@ export default defineConfig({
2222
// already run in their own processes. We use threads instead because the
2323
// overhead is significantly less.
2424
pool: 'threads',
25+
// Run tests sequentially to avoid port conflicts with wrangler dev processes
26+
poolOptions: {
27+
threads: {
28+
singleThread: true,
29+
},
30+
},
2531
reporters: process.env.DEBUG
2632
? ['default', { summary: false }]
2733
: process.env.GITHUB_ACTIONS

packages/cloudflare/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ export {
6969
functionToStringIntegration,
7070
// eslint-disable-next-line deprecation/deprecation
7171
inboundFiltersIntegration,
72+
instrumentOpenAiClient,
7273
eventFiltersIntegration,
7374
linkedErrorsIntegration,
7475
requestDataIntegration,

packages/vercel-edge/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ export {
6969
functionToStringIntegration,
7070
// eslint-disable-next-line deprecation/deprecation
7171
inboundFiltersIntegration,
72+
instrumentOpenAiClient,
7273
eventFiltersIntegration,
7374
linkedErrorsIntegration,
7475
requestDataIntegration,

0 commit comments

Comments
 (0)