Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions dev-packages/node-integration-tests/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@
"nock": "^13.5.5",
"node-cron": "^3.0.3",
"node-schedule": "^2.1.1",
"openai": "5.18.1",
"pg": "8.16.0",
"postgres": "^3.4.7",
"proxy": "^2.1.1",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ describe('OpenAI Tool Calls integration', () => {
data: {
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'manual',
'sentry.origin': 'auto.function.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION,
Expand All @@ -83,15 +83,15 @@ describe('OpenAI Tool Calls integration', () => {
},
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'manual',
origin: 'auto.function.openai',
status: 'ok',
}),
// Second span - chat completion with tools and streaming
expect.objectContaining({
data: {
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'manual',
'sentry.origin': 'auto.function.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.request.stream': true,
Expand All @@ -111,15 +111,15 @@ describe('OpenAI Tool Calls integration', () => {
},
description: 'chat gpt-4 stream-response',
op: 'gen_ai.chat',
origin: 'manual',
origin: 'auto.function.openai',
status: 'ok',
}),
// Third span - responses API with tools (non-streaming)
expect.objectContaining({
data: {
'gen_ai.operation.name': 'responses',
'sentry.op': 'gen_ai.responses',
'sentry.origin': 'manual',
'sentry.origin': 'auto.function.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION,
Expand All @@ -137,15 +137,15 @@ describe('OpenAI Tool Calls integration', () => {
},
description: 'responses gpt-4',
op: 'gen_ai.responses',
origin: 'manual',
origin: 'auto.function.openai',
status: 'ok',
}),
// Fourth span - responses API with tools and streaming
expect.objectContaining({
data: {
'gen_ai.operation.name': 'responses',
'sentry.op': 'gen_ai.responses',
'sentry.origin': 'manual',
'sentry.origin': 'auto.function.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.request.stream': true,
Expand All @@ -165,7 +165,7 @@ describe('OpenAI Tool Calls integration', () => {
},
description: 'responses gpt-4 stream-response',
op: 'gen_ai.responses',
origin: 'manual',
origin: 'auto.function.openai',
status: 'ok',
}),
]),
Expand All @@ -179,7 +179,7 @@ describe('OpenAI Tool Calls integration', () => {
data: {
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'manual',
'sentry.origin': 'auto.function.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]',
Expand All @@ -200,15 +200,15 @@ describe('OpenAI Tool Calls integration', () => {
},
description: 'chat gpt-4',
op: 'gen_ai.chat',
origin: 'manual',
origin: 'auto.function.openai',
status: 'ok',
}),
// Second span - chat completion with tools and streaming with PII
expect.objectContaining({
data: {
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'manual',
'sentry.origin': 'auto.function.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.request.stream': true,
Expand All @@ -230,15 +230,15 @@ describe('OpenAI Tool Calls integration', () => {
},
description: 'chat gpt-4 stream-response',
op: 'gen_ai.chat',
origin: 'manual',
origin: 'auto.function.openai',
status: 'ok',
}),
// Third span - responses API with tools (non-streaming) with PII
expect.objectContaining({
data: {
'gen_ai.operation.name': 'responses',
'sentry.op': 'gen_ai.responses',
'sentry.origin': 'manual',
'sentry.origin': 'auto.function.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]',
Expand All @@ -258,15 +258,15 @@ describe('OpenAI Tool Calls integration', () => {
},
description: 'responses gpt-4',
op: 'gen_ai.responses',
origin: 'manual',
origin: 'auto.function.openai',
status: 'ok',
}),
// Fourth span - responses API with tools and streaming with PII
expect.objectContaining({
data: {
'gen_ai.operation.name': 'responses',
'sentry.op': 'gen_ai.responses',
'sentry.origin': 'manual',
'sentry.origin': 'auto.function.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-4',
'gen_ai.request.stream': true,
Expand All @@ -288,7 +288,7 @@ describe('OpenAI Tool Calls integration', () => {
},
description: 'responses gpt-4 stream-response',
op: 'gen_ai.responses',
origin: 'manual',
origin: 'auto.function.openai',
status: 'ok',
}),
]),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import express from 'express';
import OpenAI from 'openai';

const PORT = 3333;

function startMockOpenAiServer() {
const app = express();
app.use(express.json());

app.post('/openai/chat/completions', (req, res) => {
res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: req.body.model,
system_fingerprint: 'fp_44709d6fcb',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Hello from OpenAI mock!',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});
return app.listen(PORT);
}

async function run() {
const server = startMockOpenAiServer();

const client = new OpenAI({
baseURL: `http://localhost:${PORT}/openai`,
apiKey: 'mock-api-key',
});

const response = await client.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is the capital of France?' },
],
temperature: 0.7,
max_tokens: 100,
});

// eslint-disable-next-line no-console
console.log(JSON.stringify(response));

server.close();
}

run();
Loading
Loading