Skip to content

Commit db55a48

Browse files
committed
feat(browser): Expose AI instrumentation methods
1 parent 985873e commit db55a48

File tree

10 files changed

+427
-0
lines changed

10 files changed

+427
-0
lines changed
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
// Mock Anthropic client for browser testing
2+
export class MockAnthropic {
3+
constructor(config) {
4+
this.apiKey = config.apiKey;
5+
6+
// Main focus: messages.create functionality
7+
this.messages = {
8+
create: async (...args) => {
9+
const params = args[0];
10+
// Simulate processing time
11+
await new Promise(resolve => setTimeout(resolve, 10));
12+
13+
if (params.model === 'error-model') {
14+
const error = new Error('Model not found');
15+
error.status = 404;
16+
error.headers = { 'x-request-id': 'mock-request-123' };
17+
throw error;
18+
}
19+
20+
return {
21+
id: 'msg_mock123',
22+
type: 'message',
23+
role: 'assistant',
24+
model: params.model,
25+
content: [
26+
{
27+
type: 'text',
28+
text: 'Hello from Anthropic mock!',
29+
},
30+
],
31+
stop_reason: 'end_turn',
32+
stop_sequence: null,
33+
usage: {
34+
input_tokens: 10,
35+
output_tokens: 15,
36+
cache_creation_input_tokens: 0,
37+
cache_read_input_tokens: 0,
38+
},
39+
};
40+
},
41+
countTokens: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock', input_tokens: 0 }),
42+
};
43+
44+
// Minimal implementations for required interface compliance
45+
this.models = {
46+
list: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock' }),
47+
get: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock' }),
48+
};
49+
50+
this.completions = {
51+
create: async (..._args) => ({ id: 'mock', type: 'completion', model: 'mock' }),
52+
};
53+
}
54+
}
55+
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
import * as Sentry from '@sentry/browser';
2+
import { MockAnthropic } from './mocks.js';
3+
4+
window.Sentry = Sentry;
5+
6+
Sentry.init({
7+
dsn: 'https://[email protected]/1337',
8+
tracesSampleRate: 1,
9+
});
10+
11+
const mockClient = new MockAnthropic({
12+
apiKey: 'mock-api-key',
13+
});
14+
15+
const client = Sentry.instrumentAnthropicAiClient(mockClient);
16+
17+
// Test that manual instrumentation doesn't crash the browser
18+
// The instrumentation automatically creates spans
19+
client.messages.create({
20+
model: 'claude-3-haiku-20240307',
21+
messages: [{ role: 'user', content: 'What is the capital of France?' }],
22+
temperature: 0.7,
23+
max_tokens: 100,
24+
});
25+
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import { expect } from '@playwright/test';
2+
import { sentryTest } from '../../../../utils/fixtures';
3+
import { envelopeRequestParser, shouldSkipTracingTest, waitForTransactionRequest } from '../../../../utils/helpers';
4+
5+
// These tests are not exhaustive because the instrumentation is
6+
// already tested in the node integration tests and we merely
7+
// want to test that the instrumentation does not crash in the browser
8+
// and that gen_ai transactions are sent.
9+
10+
sentryTest('manual Anthropic instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => {
11+
if (shouldSkipTracingTest()) {
12+
sentryTest.skip();
13+
}
14+
15+
const transactionPromise = waitForTransactionRequest(page, event => {
16+
return !!event.transaction?.includes('claude-3-haiku-20240307');
17+
});
18+
19+
const url = await getLocalTestUrl({ testDir: __dirname });
20+
await page.goto(url);
21+
22+
const req = await transactionPromise;
23+
const eventData = envelopeRequestParser(req);
24+
25+
// Verify it's a gen_ai transaction
26+
expect(eventData.transaction).toBe('messages claude-3-haiku-20240307');
27+
expect(eventData.contexts?.trace?.op).toBe('gen_ai.messages');
28+
expect(eventData.contexts?.trace?.origin).toBe('auto.ai.anthropic');
29+
expect(eventData.contexts?.trace?.data).toMatchObject({
30+
'gen_ai.operation.name': 'messages',
31+
'gen_ai.system': 'anthropic',
32+
'gen_ai.request.model': 'claude-3-haiku-20240307',
33+
'gen_ai.request.temperature': 0.7,
34+
'gen_ai.response.model': 'claude-3-haiku-20240307',
35+
'gen_ai.response.id': 'msg_mock123',
36+
'gen_ai.usage.input_tokens': 10,
37+
'gen_ai.usage.output_tokens': 15,
38+
});
39+
});
Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
// Mock Google GenAI client for browser testing
2+
export class MockGoogleGenAI {
3+
constructor(config) {
4+
this.apiKey = config.apiKey;
5+
6+
// models.generateContent functionality
7+
this.models = {
8+
generateContent: async (...args) => {
9+
const params = args[0];
10+
// Simulate processing time
11+
await new Promise(resolve => setTimeout(resolve, 10));
12+
13+
if (params.model === 'error-model') {
14+
const error = new Error('Model not found');
15+
error.status = 404;
16+
error.headers = { 'x-request-id': 'mock-request-123' };
17+
throw error;
18+
}
19+
20+
return {
21+
candidates: [
22+
{
23+
content: {
24+
parts: [
25+
{
26+
text: 'Hello from Google GenAI mock!',
27+
},
28+
],
29+
role: 'model',
30+
},
31+
finishReason: 'stop',
32+
index: 0,
33+
},
34+
],
35+
usageMetadata: {
36+
promptTokenCount: 8,
37+
candidatesTokenCount: 12,
38+
totalTokenCount: 20,
39+
},
40+
};
41+
},
42+
generateContentStream: async () => {
43+
// Return a promise that resolves to an async generator
44+
return (async function* () {
45+
yield {
46+
candidates: [
47+
{
48+
content: {
49+
parts: [{ text: 'Streaming response' }],
50+
role: 'model',
51+
},
52+
finishReason: 'stop',
53+
index: 0,
54+
},
55+
],
56+
};
57+
})();
58+
},
59+
};
60+
61+
// chats.create implementation
62+
this.chats = {
63+
create: (...args) => {
64+
const params = args[0];
65+
const model = params.model;
66+
67+
return {
68+
modelVersion: model,
69+
sendMessage: async (..._messageArgs) => {
70+
// Simulate processing time
71+
await new Promise(resolve => setTimeout(resolve, 10));
72+
73+
return {
74+
candidates: [
75+
{
76+
content: {
77+
parts: [
78+
{
79+
text: 'This is a joke from the chat!',
80+
},
81+
],
82+
role: 'model',
83+
},
84+
finishReason: 'stop',
85+
index: 0,
86+
},
87+
],
88+
usageMetadata: {
89+
promptTokenCount: 8,
90+
candidatesTokenCount: 12,
91+
totalTokenCount: 20,
92+
},
93+
modelVersion: model, // Include model version in response
94+
};
95+
},
96+
sendMessageStream: async () => {
97+
// Return a promise that resolves to an async generator
98+
return (async function* () {
99+
yield {
100+
candidates: [
101+
{
102+
content: {
103+
parts: [{ text: 'Streaming chat response' }],
104+
role: 'model',
105+
},
106+
finishReason: 'stop',
107+
index: 0,
108+
},
109+
],
110+
};
111+
})();
112+
},
113+
};
114+
},
115+
};
116+
}
117+
}
118+
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import * as Sentry from '@sentry/browser';
2+
import { MockGoogleGenAI } from './mocks.js';
3+
4+
window.Sentry = Sentry;
5+
6+
Sentry.init({
7+
dsn: 'https://[email protected]/1337',
8+
tracesSampleRate: 1,
9+
});
10+
11+
const mockClient = new MockGoogleGenAI({
12+
apiKey: 'mock-api-key',
13+
});
14+
15+
const client = Sentry.instrumentGoogleGenAIClient(mockClient);
16+
17+
// Test that manual instrumentation doesn't crash the browser
18+
// The instrumentation automatically creates spans
19+
// Test both chats and models APIs
20+
const chat = client.chats.create({
21+
model: 'gemini-1.5-pro',
22+
config: {
23+
temperature: 0.8,
24+
topP: 0.9,
25+
maxOutputTokens: 150,
26+
},
27+
history: [
28+
{
29+
role: 'user',
30+
parts: [{ text: 'Hello, how are you?' }],
31+
},
32+
],
33+
});
34+
35+
chat.sendMessage({
36+
message: 'Tell me a joke',
37+
})
38+
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import { expect } from '@playwright/test';
2+
import { sentryTest } from '../../../../utils/fixtures';
3+
import { envelopeRequestParser, shouldSkipTracingTest, waitForTransactionRequest } from '../../../../utils/helpers';
4+
5+
// These tests are not exhaustive because the instrumentation is
6+
// already tested in the node integration tests and we merely
7+
// want to test that the instrumentation does not crash in the browser
8+
// and that gen_ai transactions are sent.
9+
10+
sentryTest('manual Google GenAI instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => {
11+
if (shouldSkipTracingTest()) {
12+
sentryTest.skip();
13+
}
14+
15+
const transactionPromise = waitForTransactionRequest(page, event => {
16+
return !!event.transaction?.includes('gemini-1.5-pro');
17+
});
18+
19+
const url = await getLocalTestUrl({ testDir: __dirname });
20+
await page.goto(url);
21+
22+
const req = await transactionPromise;
23+
const eventData = envelopeRequestParser(req);
24+
25+
// Verify it's a gen_ai transaction
26+
expect(eventData.transaction).toBe('chat gemini-1.5-pro create');
27+
expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat');
28+
expect(eventData.contexts?.trace?.origin).toBe('auto.ai.google_genai');
29+
expect(eventData.contexts?.trace?.data).toMatchObject({
30+
'gen_ai.operation.name': 'chat',
31+
'gen_ai.system': 'google_genai',
32+
'gen_ai.request.model': 'gemini-1.5-pro',
33+
});
34+
});
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// Mock OpenAI client for browser testing
2+
export class MockOpenAi {
3+
constructor(config) {
4+
this.apiKey = config.apiKey;
5+
6+
this.chat = {
7+
completions: {
8+
create: async (...args) => {
9+
const params = args[0];
10+
// Simulate processing time
11+
await new Promise(resolve => setTimeout(resolve, 10));
12+
13+
if (params.model === 'error-model') {
14+
const error = new Error('Model not found');
15+
error.status = 404;
16+
error.headers = { 'x-request-id': 'mock-request-123' };
17+
throw error;
18+
}
19+
20+
return {
21+
id: 'chatcmpl-mock123',
22+
object: 'chat.completion',
23+
created: 1677652288,
24+
model: params.model,
25+
system_fingerprint: 'fp_44709d6fcb',
26+
choices: [
27+
{
28+
index: 0,
29+
message: {
30+
role: 'assistant',
31+
content: 'Hello from OpenAI mock!',
32+
},
33+
finish_reason: 'stop',
34+
},
35+
],
36+
usage: {
37+
prompt_tokens: 10,
38+
completion_tokens: 15,
39+
total_tokens: 25,
40+
},
41+
};
42+
},
43+
},
44+
};
45+
}
46+
}
47+
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import * as Sentry from '@sentry/browser';
2+
import { MockOpenAi } from './mocks.js';
3+
4+
window.Sentry = Sentry;
5+
6+
Sentry.init({
7+
dsn: 'https://[email protected]/1337',
8+
tracesSampleRate: 1,
9+
});
10+
11+
const mockClient = new MockOpenAi({
12+
apiKey: 'mock-api-key',
13+
});
14+
15+
const client = Sentry.instrumentOpenAiClient(mockClient);
16+
17+
// Test that manual instrumentation doesn't crash the browser
18+
// The instrumentation automatically creates spans
19+
client.chat.completions.create({
20+
model: 'gpt-3.5-turbo',
21+
messages: [
22+
{ role: 'system', content: 'You are a helpful assistant.' },
23+
{ role: 'user', content: 'What is the capital of France?' },
24+
],
25+
temperature: 0.7,
26+
max_tokens: 100,
27+
});
28+

0 commit comments

Comments
 (0)