Skip to content

Commit 6966214

Browse files
committed
add tests, refactor some types
1 parent c6e8561 commit 6966214

File tree

15 files changed

+359
-295
lines changed

15 files changed

+359
-295
lines changed
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
// Set environment variable to trigger the fourth test
5+
process.env.TEST_OPTIONS = '1';
6+
7+
Sentry.init({
8+
dsn: 'https://[email protected]/1337',
9+
release: '1.0',
10+
tracesSampleRate: 1.0,
11+
sendDefaultPii: false,
12+
transport: loggingTransport,
13+
integrations: [
14+
Sentry.openAIIntegration({
15+
recordInputs: true,
16+
recordOutputs: true,
17+
}),
18+
],
19+
});
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
integrations: [Sentry.openAIIntegration()],
11+
});
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
integrations: [Sentry.openAIIntegration()],
11+
});
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
import { instrumentOpenAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
class MockOpenAI {
5+
constructor(config) {
6+
this.apiKey = config.apiKey;
7+
8+
this.chat = {
9+
completions: {
10+
create: async params => {
11+
// Simulate processing time
12+
await new Promise(resolve => setTimeout(resolve, 10));
13+
14+
if (params.model === 'error-model') {
15+
const error = new Error('Model not found');
16+
error.status = 404;
17+
error.headers = { 'x-request-id': 'mock-request-123' };
18+
throw error;
19+
}
20+
21+
return {
22+
id: 'chatcmpl-mock123',
23+
object: 'chat.completion',
24+
created: 1677652288,
25+
model: params.model,
26+
system_fingerprint: 'fp_44709d6fcb',
27+
choices: [
28+
{
29+
index: 0,
30+
message: {
31+
role: 'assistant',
32+
content: 'Hello from OpenAI mock!',
33+
},
34+
finish_reason: 'stop',
35+
},
36+
],
37+
usage: {
38+
prompt_tokens: 10,
39+
completion_tokens: 15,
40+
total_tokens: 25,
41+
},
42+
};
43+
},
44+
},
45+
};
46+
47+
this.responses = {
48+
create: async params => {
49+
await new Promise(resolve => setTimeout(resolve, 10));
50+
51+
return {
52+
id: 'resp_mock456',
53+
object: 'response',
54+
created: 1677652290,
55+
model: params.model,
56+
input_text: params.input,
57+
output_text: `Response to: ${params.input}`,
58+
finish_reason: 'stop',
59+
usage: {
60+
input_tokens: 5,
61+
output_tokens: 8,
62+
total_tokens: 13,
63+
},
64+
};
65+
},
66+
};
67+
}
68+
}
69+
70+
async function run() {
71+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
72+
const mockClient = new MockOpenAI({
73+
apiKey: 'mock-api-key',
74+
});
75+
76+
const sentryClient = Sentry.getCurrentScope().getClient();
77+
const sendDefaultPii = sentryClient?.getOptions().sendDefaultPii || false;
78+
79+
const options =
80+
process.env.TEST_OPTIONS === '1'
81+
? { recordInputs: true, recordOutputs: true }
82+
: { recordInputs: sendDefaultPii, recordOutputs: sendDefaultPii };
83+
84+
const client = instrumentOpenAiClient(mockClient, options);
85+
86+
// First test: basic chat completion
87+
await client.chat.completions.create({
88+
model: 'gpt-3.5-turbo',
89+
messages: [
90+
{ role: 'system', content: 'You are a helpful assistant.' },
91+
{ role: 'user', content: 'What is the capital of France?' },
92+
],
93+
temperature: 0.7,
94+
max_tokens: 100,
95+
});
96+
97+
// Second test: responses API
98+
await client.responses.create({
99+
model: 'gpt-3.5-turbo',
100+
input: 'Translate this to French: Hello',
101+
instructions: 'You are a translator',
102+
});
103+
104+
// Third test: error handling
105+
try {
106+
await client.chat.completions.create({
107+
model: 'error-model',
108+
messages: [{ role: 'user', content: 'This will fail' }],
109+
});
110+
} catch (error) {
111+
// Error is expected and handled
112+
}
113+
});
114+
}
115+
116+
run();
Lines changed: 182 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,182 @@
1+
import { afterAll, describe, expect } from 'vitest';
2+
import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner';
3+
4+
describe('OpenAI integration', () => {
5+
afterAll(() => {
6+
cleanupChildProcesses();
7+
});
8+
9+
const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
10+
transaction: 'main',
11+
spans: expect.arrayContaining([
12+
// First span - basic chat completion without PII
13+
expect.objectContaining({
14+
data: {
15+
'gen_ai.operation.name': 'chat',
16+
'sentry.op': 'gen_ai.chat',
17+
'sentry.origin': 'manual',
18+
'gen_ai.system': 'openai',
19+
'gen_ai.request.model': 'gpt-3.5-turbo',
20+
'gen_ai.request.temperature': 0.7,
21+
'gen_ai.response.model': 'gpt-3.5-turbo',
22+
'gen_ai.response.id': 'chatcmpl-mock123',
23+
'gen_ai.response.finish_reasons': '["stop"]',
24+
'gen_ai.usage.input_tokens': 10,
25+
'gen_ai.usage.output_tokens': 15,
26+
'gen_ai.usage.total_tokens': 25,
27+
'openai.response.id': 'chatcmpl-mock123',
28+
'openai.response.model': 'gpt-3.5-turbo',
29+
'openai.response.timestamp': '2023-03-01T06:31:28.000Z',
30+
'openai.usage.completion_tokens': 15,
31+
'openai.usage.prompt_tokens': 10,
32+
},
33+
description: 'chat gpt-3.5-turbo',
34+
op: 'gen_ai.chat',
35+
origin: 'manual',
36+
status: 'ok',
37+
}),
38+
// Second span - responses API
39+
expect.objectContaining({
40+
data: {
41+
'gen_ai.operation.name': 'chat',
42+
'sentry.op': 'gen_ai.chat',
43+
'sentry.origin': 'manual',
44+
'gen_ai.system': 'openai',
45+
'gen_ai.request.model': 'gpt-3.5-turbo',
46+
'gen_ai.response.model': 'gpt-3.5-turbo',
47+
'gen_ai.response.id': 'resp_mock456',
48+
'gen_ai.usage.input_tokens': 5,
49+
'gen_ai.usage.output_tokens': 8,
50+
'gen_ai.usage.total_tokens': 13,
51+
'openai.response.id': 'resp_mock456',
52+
'openai.response.model': 'gpt-3.5-turbo',
53+
'openai.usage.completion_tokens': 8,
54+
'openai.usage.prompt_tokens': 5,
55+
},
56+
description: 'chat gpt-3.5-turbo',
57+
op: 'gen_ai.chat',
58+
origin: 'manual',
59+
status: 'ok',
60+
}),
61+
// Third span - error handling
62+
expect.objectContaining({
63+
data: {
64+
'gen_ai.operation.name': 'chat',
65+
'sentry.op': 'gen_ai.chat',
66+
'sentry.origin': 'manual',
67+
'gen_ai.system': 'openai',
68+
'gen_ai.request.model': 'error-model',
69+
},
70+
description: 'chat error-model',
71+
op: 'gen_ai.chat',
72+
origin: 'manual',
73+
status: 'unknown_error',
74+
}),
75+
]),
76+
};
77+
78+
const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
79+
transaction: 'main',
80+
spans: expect.arrayContaining([
81+
// First span - basic chat completion with PII
82+
expect.objectContaining({
83+
data: {
84+
'gen_ai.operation.name': 'chat',
85+
'sentry.op': 'gen_ai.chat',
86+
'sentry.origin': 'manual',
87+
'gen_ai.system': 'openai',
88+
'gen_ai.request.model': 'gpt-3.5-turbo',
89+
'gen_ai.request.temperature': 0.7,
90+
'gen_ai.request.messages':
91+
'[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is the capital of France?"}]',
92+
'gen_ai.response.model': 'gpt-3.5-turbo',
93+
'gen_ai.response.id': 'chatcmpl-mock123',
94+
'gen_ai.response.finish_reasons': '["stop"]',
95+
'gen_ai.response.text': '["Hello from OpenAI mock!"]',
96+
'gen_ai.usage.input_tokens': 10,
97+
'gen_ai.usage.output_tokens': 15,
98+
'gen_ai.usage.total_tokens': 25,
99+
'openai.response.id': 'chatcmpl-mock123',
100+
'openai.response.model': 'gpt-3.5-turbo',
101+
'openai.response.timestamp': '2023-03-01T06:31:28.000Z',
102+
'openai.usage.completion_tokens': 15,
103+
'openai.usage.prompt_tokens': 10,
104+
},
105+
description: 'chat gpt-3.5-turbo',
106+
op: 'gen_ai.chat',
107+
origin: 'manual',
108+
status: 'ok',
109+
}),
110+
// Second span - responses API with PII (no messages attribute for this API)
111+
expect.objectContaining({
112+
data: {
113+
'gen_ai.operation.name': 'chat',
114+
'sentry.op': 'gen_ai.chat',
115+
'sentry.origin': 'manual',
116+
'gen_ai.system': 'openai',
117+
'gen_ai.request.model': 'gpt-3.5-turbo',
118+
'gen_ai.response.text': '["Response to: Translate this to French: Hello"]',
119+
'gen_ai.response.model': 'gpt-3.5-turbo',
120+
'gen_ai.response.id': 'resp_mock456',
121+
'gen_ai.usage.input_tokens': 5,
122+
'gen_ai.usage.output_tokens': 8,
123+
'gen_ai.usage.total_tokens': 13,
124+
'openai.response.id': 'resp_mock456',
125+
'openai.response.model': 'gpt-3.5-turbo',
126+
'openai.usage.completion_tokens': 8,
127+
'openai.usage.prompt_tokens': 5,
128+
},
129+
description: 'chat gpt-3.5-turbo',
130+
op: 'gen_ai.chat',
131+
origin: 'manual',
132+
status: 'ok',
133+
}),
134+
// Third span - error handling with PII
135+
expect.objectContaining({
136+
data: {
137+
'gen_ai.operation.name': 'chat',
138+
'sentry.op': 'gen_ai.chat',
139+
'sentry.origin': 'manual',
140+
'gen_ai.system': 'openai',
141+
'gen_ai.request.model': 'error-model',
142+
'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]',
143+
},
144+
description: 'chat error-model',
145+
op: 'gen_ai.chat',
146+
origin: 'manual',
147+
status: 'unknown_error',
148+
}),
149+
]),
150+
};
151+
152+
const EXPECTED_TRANSACTION_WITH_OPTIONS = {
153+
transaction: 'main',
154+
spans: expect.arrayContaining([
155+
// Check that custom options are respected
156+
expect.objectContaining({
157+
data: expect.objectContaining({
158+
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
159+
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
160+
}),
161+
}),
162+
]),
163+
};
164+
165+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
166+
test('creates openai related spans with sendDefaultPii: false', async () => {
167+
await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed();
168+
});
169+
});
170+
171+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
172+
test('creates openai related spans with sendDefaultPii: true', async () => {
173+
await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }).start().completed();
174+
});
175+
});
176+
177+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-options.mjs', (createRunner, test) => {
178+
test('creates openai related spans with custom options', async () => {
179+
await createRunner().expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS }).start().completed();
180+
});
181+
});
182+
});

dev-packages/rollup-utils/npmHelpers.mjs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ export function makeBaseNPMConfig(options = {}) {
9393
}
9494

9595
return true;
96-
}
96+
},
9797
},
9898

9999
plugins: [nodeResolvePlugin, sucrasePlugin, debugBuildStatementReplacePlugin, rrwebBuildPlugin, cleanupPlugin],

packages/astro/src/index.server.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@ export {
8484
nodeContextIntegration,
8585
onUncaughtExceptionIntegration,
8686
onUnhandledRejectionIntegration,
87+
openAIIntegration,
8788
parameterize,
8889
postgresIntegration,
8990
postgresJsIntegration,

packages/aws-serverless/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ export {
5151
nativeNodeFetchIntegration,
5252
onUncaughtExceptionIntegration,
5353
onUnhandledRejectionIntegration,
54+
openAIIntegration,
5455
modulesIntegration,
5556
contextLinesIntegration,
5657
nodeContextIntegration,

packages/bun/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ export {
7171
nativeNodeFetchIntegration,
7272
onUncaughtExceptionIntegration,
7373
onUnhandledRejectionIntegration,
74+
openAIIntegration,
7475
modulesIntegration,
7576
contextLinesIntegration,
7677
nodeContextIntegration,

packages/core/src/utils/openai-attributes.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@ export const GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system';
2020
*/
2121
export const GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model';
2222

23+
/**
24+
* Whether streaming was enabled for the request
25+
*/
26+
export const GEN_AI_REQUEST_STREAM_ATTRIBUTE = 'gen_ai.request.stream';
27+
2328
/**
2429
* The temperature setting for the model request
2530
*/

0 commit comments

Comments
 (0)