Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import express from 'express';
import { AzureOpenAI } from 'openai';

function startMockOpenAiServer() {
const app = express();
app.use(express.json());

app.post('/azureopenai/deployments/:model/chat/completions', (req, res) => {
res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: req.body.model,
system_fingerprint: 'fp_44709d6fcb',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Hello from OpenAI mock!',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});
return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockOpenAiServer();

const client = new AzureOpenAI({
baseURL: `http://localhost:${server.address().port}/azureopenai`,
apiKey: 'mock-api-key',
apiVersion: '2024-02-15-preview',
});

const response = await client.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is the capital of France?' },
],
temperature: 0.7,
max_tokens: 100,
});

// eslint-disable-next-line no-console
console.log(JSON.stringify(response));

server.close();
}

run();
47 changes: 47 additions & 0 deletions dev-packages/node-integration-tests/suites/tracing/openai/test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -501,6 +501,53 @@ describe('OpenAI integration', () => {
});
});

createEsmAndCjsTests(__dirname, 'scenario-azure-openai.mjs', 'instrument.mjs', (createRunner, test) => {
test('it works with Azure OpenAI', async () => {
await createRunner()
// First the span that our mock express server is emitting, unrelated to this test
.expect({
transaction: {
transaction: 'POST /azureopenai/deployments/:model/chat/completions',
},
})
.expect({
transaction: {
transaction: 'chat gpt-3.5-turbo',
contexts: {
trace: {
span_id: expect.any(String),
trace_id: expect.any(String),
data: {
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-3.5-turbo',
'gen_ai.request.temperature': 0.7,
'gen_ai.response.model': 'gpt-3.5-turbo',
'gen_ai.response.id': 'chatcmpl-mock123',
'gen_ai.response.finish_reasons': '["stop"]',
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 15,
'gen_ai.usage.total_tokens': 25,
'openai.response.id': 'chatcmpl-mock123',
'openai.response.model': 'gpt-3.5-turbo',
'openai.response.timestamp': '2023-03-01T06:31:28.000Z',
'openai.usage.completion_tokens': 15,
'openai.usage.prompt_tokens': 10,
},
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
},
},
},
})
.start()
.completed();
});
});

createEsmAndCjsTests(
__dirname,
'truncation/scenario-message-truncation-completions.mjs',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import express from 'express';
import { AzureOpenAI } from 'openai';

function startMockOpenAiServer() {
const app = express();
app.use(express.json());

app.post('/azureopenai/deployments/:model/chat/completions', (req, res) => {
res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: req.body.model,
system_fingerprint: 'fp_44709d6fcb',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Hello from OpenAI mock!',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});
return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockOpenAiServer();

const client = new AzureOpenAI({
baseURL: `http://localhost:${server.address().port}/azureopenai`,
apiKey: 'mock-api-key',
apiVersion: '2024-02-15-preview',
});

const response = await client.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is the capital of France?' },
],
temperature: 0.7,
max_tokens: 100,
});

// eslint-disable-next-line no-console
console.log(JSON.stringify(response));

server.close();
}

run();
Original file line number Diff line number Diff line change
Expand Up @@ -562,4 +562,62 @@ describe('OpenAI integration (V6)', () => {
},
},
);

createEsmAndCjsTests(
__dirname,
'scenario-azure-openai.mjs',
'instrument.mjs',
(createRunner, test) => {
test('it works with Azure OpenAI (v6)', async () => {
await createRunner()
// First the span that our mock express server is emitting, unrelated to this test
.expect({
transaction: {
transaction: 'POST /azureopenai/deployments/:model/chat/completions',
},
})
.expect({
transaction: {
transaction: 'chat gpt-3.5-turbo',
contexts: {
trace: {
span_id: expect.any(String),
trace_id: expect.any(String),
data: {
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.openai',
'gen_ai.system': 'openai',
'gen_ai.request.model': 'gpt-3.5-turbo',
'gen_ai.request.temperature': 0.7,
'gen_ai.response.model': 'gpt-3.5-turbo',
'gen_ai.response.id': 'chatcmpl-mock123',
'gen_ai.response.finish_reasons': '["stop"]',
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 15,
'gen_ai.usage.total_tokens': 25,
'openai.response.id': 'chatcmpl-mock123',
'openai.response.model': 'gpt-3.5-turbo',
'openai.response.timestamp': '2023-03-01T06:31:28.000Z',
'openai.usage.completion_tokens': 15,
'openai.usage.prompt_tokens': 10,
},
op: 'gen_ai.chat',
origin: 'auto.ai.openai',
status: 'ok',
},
},
},
})
.start()
.completed();
});
},
{
additionalDependencies: {
openai: '6.0.0',
express: 'latest',
},
},
);
});
14 changes: 11 additions & 3 deletions packages/node/src/integrations/tracing/openai/instrumentation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ export interface OpenAiIntegration extends Integration {
interface PatchedModuleExports {
[key: string]: unknown;
OpenAI: abstract new (...args: unknown[]) => OpenAiClient;
AzureOpenAI: abstract new (...args: unknown[]) => OpenAiClient;
}

/**
Expand Down Expand Up @@ -59,7 +60,14 @@ export class SentryOpenAiInstrumentation extends InstrumentationBase<Instrumenta
* Core patch logic applying instrumentation to the OpenAI client constructor.
*/
private _patch(exports: PatchedModuleExports): PatchedModuleExports | void {
const Original = exports.OpenAI;
let result = exports;
result = this._patchClient(result, 'OpenAI');
result = this._patchClient(result, 'AzureOpenAI');
return result;
}

private _patchClient(exports: PatchedModuleExports, exportKey: 'OpenAI' | 'AzureOpenAI'): PatchedModuleExports {
const Original = exports[exportKey];

const WrappedOpenAI = function (this: unknown, ...args: unknown[]) {
// Check if wrapping should be skipped (e.g., when LangChain is handling instrumentation)
Expand Down Expand Up @@ -97,10 +105,10 @@ export class SentryOpenAiInstrumentation extends InstrumentationBase<Instrumenta
// Constructor replacement - handle read-only properties
// The OpenAI property might have only a getter, so use defineProperty
try {
exports.OpenAI = WrappedOpenAI;
exports[exportKey] = WrappedOpenAI;
} catch (error) {
// If direct assignment fails, override the property descriptor
Object.defineProperty(exports, 'OpenAI', {
Object.defineProperty(exports, exportKey, {
value: WrappedOpenAI,
writable: true,
configurable: true,
Expand Down
Loading