Skip to content

Commit 7feef93

Browse files
committed
update op name
1 parent 17a2f1e commit 7feef93

File tree

11 files changed

+436
-11
lines changed

11 files changed

+436
-11
lines changed

dev-packages/e2e-tests/test-applications/nextjs-15/app/ai-error-test/page.tsx

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
import ai from 'ai';
2-
ai.generateText
1+
import { generateText } from 'ai';
32
import { MockLanguageModelV1 } from 'ai/test';
43
import { z } from 'zod';
54
import * as Sentry from '@sentry/nextjs';
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import * as Sentry from '@sentry/node';
2+
import { nodeContextIntegration } from '@sentry/node-core';
3+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
4+
5+
Sentry.init({
6+
dsn: 'https://[email protected]/1337',
7+
release: '1.0',
8+
tracesSampleRate: 1.0,
9+
sendDefaultPii: false,
10+
transport: loggingTransport,
11+
integrations: [
12+
Sentry.anthropicAIIntegration({
13+
recordInputs: true,
14+
recordOutputs: true,
15+
}),
16+
nodeContextIntegration(),
17+
],
18+
});
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
import * as Sentry from '@sentry/node';
2+
import { nodeContextIntegration } from '@sentry/node-core';
3+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
4+
5+
Sentry.init({
6+
dsn: 'https://[email protected]/1337',
7+
release: '1.0',
8+
tracesSampleRate: 1.0,
9+
sendDefaultPii: true,
10+
transport: loggingTransport,
11+
integrations: [
12+
Sentry.anthropicAIIntegration(),
13+
nodeContextIntegration(),
14+
],
15+
});
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import * as Sentry from '@sentry/node';
2+
import { nodeContextIntegration } from '@sentry/node-core';
3+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
4+
5+
Sentry.init({
6+
dsn: 'https://[email protected]/1337',
7+
release: '1.0',
8+
tracesSampleRate: 1.0,
9+
sendDefaultPii: false,
10+
transport: loggingTransport,
11+
// Force include the integration
12+
integrations: [
13+
Sentry.anthropicAIIntegration(),
14+
nodeContextIntegration(),
15+
],
16+
});
Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
import { instrumentAnthropicAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
class MockAnthropic {
5+
constructor(config) {
6+
this.apiKey = config.apiKey;
7+
8+
// Create messages object with create and countTokens methods
9+
this.messages = {
10+
create: this._messagesCreate.bind(this),
11+
countTokens: this._messagesCountTokens.bind(this)
12+
};
13+
14+
this.models = {
15+
retrieve: this._modelsRetrieve.bind(this),
16+
};
17+
}
18+
19+
/**
20+
* Create a mock message
21+
*/
22+
async _messagesCreate(params) {
23+
// Simulate processing time
24+
await new Promise(resolve => setTimeout(resolve, 10));
25+
26+
if (params.model === 'error-model') {
27+
const error = new Error('Model not found');
28+
error.status = 404;
29+
error.headers = { 'x-request-id': 'mock-request-123' };
30+
throw error;
31+
}
32+
33+
return {
34+
id: 'msg_mock123',
35+
type: 'message',
36+
model: params.model,
37+
role: 'assistant',
38+
content: [
39+
{
40+
type: 'text',
41+
text: 'Hello from Anthropic mock!',
42+
},
43+
],
44+
stop_reason: 'end_turn',
45+
stop_sequence: null,
46+
usage: {
47+
input_tokens: 10,
48+
output_tokens: 15,
49+
},
50+
};
51+
}
52+
53+
async _messagesCountTokens() {
54+
// Simulate processing time
55+
await new Promise(resolve => setTimeout(resolve, 10));
56+
57+
// For countTokens, just return input_tokens
58+
return {
59+
input_tokens: 15
60+
}
61+
}
62+
63+
async _modelsRetrieve(modelId) {
64+
// Simulate processing time
65+
await new Promise(resolve => setTimeout(resolve, 10));
66+
67+
// Match what the actual implementation would return
68+
return {
69+
id: modelId,
70+
name: modelId,
71+
created_at: 1715145600,
72+
model: modelId, // Add model field to match the check in addResponseAttributes
73+
};
74+
}
75+
}
76+
77+
async function run() {
78+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
79+
const mockClient = new MockAnthropic({
80+
apiKey: 'mock-api-key',
81+
});
82+
83+
const client = instrumentAnthropicAiClient(mockClient);
84+
85+
// First test: basic message completion
86+
await client.messages.create({
87+
model: 'claude-3-haiku-20240307',
88+
system: 'You are a helpful assistant.',
89+
messages: [
90+
{ role: 'user', content: 'What is the capital of France?' },
91+
],
92+
temperature: 0.7,
93+
max_tokens: 100,
94+
});
95+
96+
// Second test: error handling
97+
try {
98+
await client.messages.create({
99+
model: 'error-model',
100+
messages: [{ role: 'user', content: 'This will fail' }],
101+
});
102+
} catch {
103+
// Error is expected and handled
104+
}
105+
106+
// Third test: count tokens with cached tokens
107+
await client.messages.countTokens({
108+
model: 'claude-3-haiku-20240307',
109+
messages: [
110+
{ role: 'user', content: 'What is the capital of France?' },
111+
],
112+
});
113+
114+
// Fourth test: models.retrieve
115+
await client.models.retrieve('claude-3-haiku-20240307');
116+
});
117+
}
118+
119+
run();
Lines changed: 220 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,220 @@
1+
import { afterAll, describe, expect } from 'vitest';
2+
import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner';
3+
4+
describe('Anthropic integration', () => {
5+
afterAll(() => {
6+
cleanupChildProcesses();
7+
});
8+
9+
const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
10+
transaction: 'main',
11+
spans: expect.arrayContaining([
12+
// First span - basic message completion without PII
13+
expect.objectContaining({
14+
data: {
15+
'gen_ai.operation.name': 'messages.create',
16+
'sentry.op': 'gen_ai.messages.create',
17+
'sentry.origin': 'auto.ai.anthropic',
18+
'gen_ai.system': 'anthropic',
19+
'gen_ai.request.model': 'claude-3-haiku-20240307',
20+
'gen_ai.request.temperature': 0.7,
21+
'gen_ai.request.max_tokens': 100,
22+
'gen_ai.response.model': 'claude-3-haiku-20240307',
23+
'gen_ai.response.id': 'msg_mock123',
24+
'gen_ai.usage.input_tokens': 10,
25+
'gen_ai.usage.output_tokens': 15,
26+
'gen_ai.usage.total_tokens': 25,
27+
},
28+
description: 'messages.create claude-3-haiku-20240307',
29+
op: 'gen_ai.messages.create',
30+
origin: 'auto.ai.anthropic',
31+
status: 'ok',
32+
}),
33+
// Second span - error handling
34+
expect.objectContaining({
35+
data: {
36+
'gen_ai.operation.name': 'messages.create',
37+
'sentry.op': 'gen_ai.messages.create',
38+
'sentry.origin': 'auto.ai.anthropic',
39+
'gen_ai.system': 'anthropic',
40+
'gen_ai.request.model': 'error-model',
41+
},
42+
description: 'messages.create error-model',
43+
op: 'gen_ai.messages.create',
44+
origin: 'auto.ai.anthropic',
45+
status: 'unknown_error',
46+
}),
47+
// Third span - token counting (no response.text because recordOutputs=false by default)
48+
expect.objectContaining({
49+
data: {
50+
'gen_ai.operation.name': 'messages.countTokens',
51+
'sentry.op': 'gen_ai.messages.countTokens',
52+
'sentry.origin': 'auto.ai.anthropic',
53+
'gen_ai.system': 'anthropic',
54+
'gen_ai.request.model': 'claude-3-haiku-20240307',
55+
},
56+
description: 'messages.countTokens claude-3-haiku-20240307',
57+
op: 'gen_ai.messages.countTokens',
58+
origin: 'auto.ai.anthropic',
59+
status: 'ok',
60+
}),
61+
// Fourth span - models.retrieve
62+
expect.objectContaining({
63+
data: {
64+
'gen_ai.operation.name': 'retrieve',
65+
'sentry.op': 'gen_ai.retrieve',
66+
'sentry.origin': 'auto.ai.anthropic',
67+
'gen_ai.system': 'anthropic',
68+
'gen_ai.request.model': 'claude-3-haiku-20240307',
69+
'gen_ai.response.id': 'claude-3-haiku-20240307',
70+
'gen_ai.response.model': 'claude-3-haiku-20240307',
71+
},
72+
description: 'retrieve claude-3-haiku-20240307',
73+
op: 'gen_ai.retrieve',
74+
origin: 'auto.ai.anthropic',
75+
status: 'ok',
76+
}),
77+
]),
78+
};
79+
80+
const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
81+
transaction: 'main',
82+
spans: expect.arrayContaining([
83+
// First span - basic message completion with PII
84+
expect.objectContaining({
85+
data: {
86+
'gen_ai.operation.name': 'messages.create',
87+
'sentry.op': 'gen_ai.messages.create',
88+
'sentry.origin': 'auto.ai.anthropic',
89+
'gen_ai.system': 'anthropic',
90+
'gen_ai.request.model': 'claude-3-haiku-20240307',
91+
'gen_ai.request.temperature': 0.7,
92+
'gen_ai.request.max_tokens': 100,
93+
'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]',
94+
'gen_ai.response.model': 'claude-3-haiku-20240307',
95+
'gen_ai.response.id': 'msg_mock123',
96+
'gen_ai.response.text': 'Hello from Anthropic mock!',
97+
'gen_ai.usage.input_tokens': 10,
98+
'gen_ai.usage.output_tokens': 15,
99+
'gen_ai.usage.total_tokens': 25,
100+
},
101+
description: 'messages.create claude-3-haiku-20240307',
102+
op: 'gen_ai.messages.create',
103+
origin: 'auto.ai.anthropic',
104+
status: 'ok',
105+
}),
106+
// Second span - error handling with PII
107+
expect.objectContaining({
108+
data: {
109+
'gen_ai.operation.name': 'messages.create',
110+
'sentry.op': 'gen_ai.messages.create',
111+
'sentry.origin': 'auto.ai.anthropic',
112+
'gen_ai.system': 'anthropic',
113+
'gen_ai.request.model': 'error-model',
114+
'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]',
115+
},
116+
description: 'messages.create error-model',
117+
op: 'gen_ai.messages.create',
118+
119+
origin: 'auto.ai.anthropic',
120+
status: 'unknown_error',
121+
}),
122+
// Third span - token counting with PII (response.text is present because sendDefaultPii=true enables recordOutputs)
123+
expect.objectContaining({
124+
data: {
125+
'gen_ai.operation.name': 'messages.countTokens',
126+
'sentry.op': 'gen_ai.messages.countTokens',
127+
'sentry.origin': 'auto.ai.anthropic',
128+
'gen_ai.system': 'anthropic',
129+
'gen_ai.request.model': 'claude-3-haiku-20240307',
130+
'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]',
131+
'gen_ai.response.text': '15', // Only present because recordOutputs=true when sendDefaultPii=true
132+
},
133+
description: 'messages.countTokens claude-3-haiku-20240307',
134+
op: 'gen_ai.messages.countTokens',
135+
origin: 'auto.ai.anthropic',
136+
status: 'ok',
137+
}),
138+
// Fourth span - models.retrieve with PII
139+
expect.objectContaining({
140+
data: {
141+
'gen_ai.operation.name': 'retrieve',
142+
'sentry.op': 'gen_ai.retrieve',
143+
'sentry.origin': 'auto.ai.anthropic',
144+
'gen_ai.system': 'anthropic',
145+
'gen_ai.request.model': 'claude-3-haiku-20240307',
146+
'gen_ai.response.id': 'claude-3-haiku-20240307',
147+
'gen_ai.response.model': 'claude-3-haiku-20240307',
148+
},
149+
description: 'retrieve claude-3-haiku-20240307',
150+
op: 'gen_ai.retrieve',
151+
origin: 'auto.ai.anthropic',
152+
status: 'ok',
153+
}),
154+
]),
155+
};
156+
157+
const EXPECTED_TRANSACTION_WITH_OPTIONS = {
158+
transaction: 'main',
159+
spans: expect.arrayContaining([
160+
// Check that custom options are respected
161+
expect.objectContaining({
162+
data: expect.objectContaining({
163+
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
164+
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
165+
}),
166+
}),
167+
// Check token counting with options
168+
expect.objectContaining({
169+
data: expect.objectContaining({
170+
'gen_ai.operation.name': 'messages.countTokens',
171+
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
172+
'gen_ai.response.text': '15', // Present because recordOutputs=true is set in options
173+
}),
174+
op: 'gen_ai.messages.countTokens',
175+
}),
176+
// Check models.retrieve with options
177+
expect.objectContaining({
178+
data: expect.objectContaining({
179+
'gen_ai.operation.name': 'retrieve',
180+
'gen_ai.system': 'anthropic',
181+
'gen_ai.request.model': 'claude-3-haiku-20240307',
182+
'gen_ai.response.id': 'claude-3-haiku-20240307',
183+
'gen_ai.response.model': 'claude-3-haiku-20240307',
184+
}),
185+
op: 'gen_ai.retrieve',
186+
description: 'retrieve claude-3-haiku-20240307',
187+
}),
188+
]),
189+
};
190+
191+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
192+
test('creates anthropic related spans with sendDefaultPii: false', async () => {
193+
await createRunner()
194+
.ignore('event')
195+
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
196+
.start()
197+
.completed();
198+
});
199+
});
200+
201+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
202+
test('creates anthropic related spans with sendDefaultPii: true', async () => {
203+
await createRunner()
204+
.ignore('event')
205+
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
206+
.start()
207+
.completed();
208+
});
209+
});
210+
211+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-options.mjs', (createRunner, test) => {
212+
test('creates anthropic related spans with custom options', async () => {
213+
await createRunner()
214+
.ignore('event')
215+
.expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS })
216+
.start()
217+
.completed();
218+
});
219+
});
220+
});

0 commit comments

Comments
 (0)