Skip to content

Commit a808943

Browse files
committed
add tests
1 parent 740ac92 commit a808943

File tree

4 files changed

+180
-10
lines changed

4 files changed

+180
-10
lines changed
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
import { instrumentAnthropicAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
function createMockStreamEvents(model = 'claude-3-haiku-20240307') {
5+
async function* generator() {
6+
// Provide message metadata early so the span can capture id/model/usage input tokens
7+
yield {
8+
type: 'content_block_start',
9+
message: {
10+
id: 'msg_stream_1',
11+
type: 'message',
12+
role: 'assistant',
13+
model,
14+
content: [],
15+
stop_reason: 'end_turn',
16+
stop_sequence: null,
17+
usage: {
18+
input_tokens: 10,
19+
},
20+
},
21+
};
22+
23+
// Streamed text chunks
24+
yield { type: 'content_block_delta', delta: { text: 'Hello ' } };
25+
yield { type: 'content_block_delta', delta: { text: 'from ' } };
26+
yield { type: 'content_block_delta', delta: { text: 'stream!' } };
27+
28+
// Final usage totals for output tokens
29+
yield { type: 'message_delta', usage: { output_tokens: 15 } };
30+
}
31+
32+
return generator();
33+
}
34+
35+
class MockAnthropic {
36+
constructor(config) {
37+
this.apiKey = config.apiKey;
38+
39+
this.messages = {
40+
create: this._messagesCreate.bind(this),
41+
stream: this._messagesStream.bind(this),
42+
};
43+
}
44+
45+
async _messagesCreate(params) {
46+
await new Promise(resolve => setTimeout(resolve, 5));
47+
if (params?.stream === true) {
48+
return createMockStreamEvents(params.model);
49+
}
50+
// Fallback non-streaming behavior (not used in this scenario)
51+
return {
52+
id: 'msg_mock123',
53+
type: 'message',
54+
model: params.model,
55+
role: 'assistant',
56+
content: [
57+
{
58+
type: 'text',
59+
text: 'Hello from Anthropic mock!',
60+
},
61+
],
62+
stop_reason: 'end_turn',
63+
stop_sequence: null,
64+
usage: {
65+
input_tokens: 10,
66+
output_tokens: 15,
67+
},
68+
};
69+
}
70+
71+
async _messagesStream(params) {
72+
await new Promise(resolve => setTimeout(resolve, 5));
73+
return createMockStreamEvents(params?.model);
74+
}
75+
}
76+
77+
async function run() {
78+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
79+
const mockClient = new MockAnthropic({ apiKey: 'mock-api-key' });
80+
const client = instrumentAnthropicAiClient(mockClient);
81+
82+
// 1) Streaming via stream: true param on messages.create
83+
const stream1 = await client.messages.create({
84+
model: 'claude-3-haiku-20240307',
85+
messages: [{ role: 'user', content: 'Stream this please' }],
86+
stream: true,
87+
});
88+
for await (const _ of stream1) {
89+
void _;
90+
}
91+
92+
// 2) Streaming via messages.stream API
93+
const stream2 = await client.messages.stream({
94+
model: 'claude-3-haiku-20240307',
95+
messages: [{ role: 'user', content: 'Stream this too' }],
96+
});
97+
for await (const _ of stream2) {
98+
void _;
99+
}
100+
});
101+
}
102+
103+
run();
104+
105+

dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,4 +218,78 @@ describe('Anthropic integration', () => {
218218
.completed();
219219
});
220220
});
221+
222+
const EXPECTED_STREAM_SPANS_PII_FALSE = {
223+
transaction: 'main',
224+
spans: expect.arrayContaining([
225+
// messages.create with stream: true
226+
expect.objectContaining({
227+
description: 'messages.create claude-3-haiku-20240307 stream-response',
228+
op: 'gen_ai.messages.create',
229+
data: expect.objectContaining({
230+
'gen_ai.system': 'anthropic',
231+
'gen_ai.operation.name': 'messages.create',
232+
'gen_ai.request.model': 'claude-3-haiku-20240307',
233+
'gen_ai.request.stream': true,
234+
'gen_ai.response.streaming': true,
235+
'gen_ai.response.model': 'claude-3-haiku-20240307',
236+
'gen_ai.response.id': 'msg_stream_1',
237+
'gen_ai.usage.input_tokens': 10,
238+
'gen_ai.usage.output_tokens': 15,
239+
'gen_ai.usage.total_tokens': 25,
240+
}),
241+
}),
242+
// messages.stream
243+
expect.objectContaining({
244+
description: 'stream claude-3-haiku-20240307 stream-response',
245+
op: 'gen_ai.stream',
246+
data: expect.objectContaining({
247+
'gen_ai.system': 'anthropic',
248+
'gen_ai.operation.name': 'stream',
249+
'gen_ai.request.model': 'claude-3-haiku-20240307',
250+
'gen_ai.response.streaming': true,
251+
'gen_ai.response.model': 'claude-3-haiku-20240307',
252+
'gen_ai.response.id': 'msg_stream_1',
253+
'gen_ai.usage.input_tokens': 10,
254+
'gen_ai.usage.output_tokens': 15,
255+
'gen_ai.usage.total_tokens': 25,
256+
}),
257+
}),
258+
]),
259+
};
260+
261+
const EXPECTED_STREAM_SPANS_PII_TRUE = {
262+
transaction: 'main',
263+
spans: expect.arrayContaining([
264+
expect.objectContaining({
265+
description: 'messages.create claude-3-haiku-20240307 stream-response',
266+
op: 'gen_ai.messages.create',
267+
data: expect.objectContaining({
268+
'gen_ai.response.streaming': true,
269+
// streamed text concatenated
270+
'gen_ai.response.text': 'Hello from stream!',
271+
}),
272+
}),
273+
expect.objectContaining({
274+
description: 'stream claude-3-haiku-20240307 stream-response',
275+
op: 'gen_ai.stream',
276+
data: expect.objectContaining({
277+
'gen_ai.response.streaming': true,
278+
'gen_ai.response.text': 'Hello from stream!',
279+
}),
280+
}),
281+
]),
282+
};
283+
284+
createEsmAndCjsTests(__dirname, 'scenario-stream.mjs', 'instrument.mjs', (createRunner, test) => {
285+
test('streams produce spans with token usage and metadata (PII false)', async () => {
286+
await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_SPANS_PII_FALSE }).start().completed();
287+
});
288+
});
289+
290+
createEsmAndCjsTests(__dirname, 'scenario-stream.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
291+
test('streams record response text when PII true', async () => {
292+
await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_SPANS_PII_TRUE }).start().completed();
293+
});
294+
});
221295
});

packages/core/src/utils/anthropic-ai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ function instrumentMethod<T extends unknown[], R>(
191191

192192
const result = await originalMethod.apply(context, args);
193193
return instrumentStream(
194-
result as unknown as AsyncIterable<AnthropicAiStreamingEvent>,
194+
result as AsyncIterable<AnthropicAiStreamingEvent>,
195195
span,
196196
finalOptions.recordOutputs ?? false,
197197
) as unknown as R;

packages/core/src/utils/anthropic-ai/streaming.ts

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ import { captureException } from '../../exports';
22
import { SPAN_STATUS_ERROR } from '../../tracing';
33
import type { Span } from '../../types-hoist/span';
44
import {
5-
ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE,
65
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
76
GEN_AI_RESPONSE_ID_ATTRIBUTE,
87
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
@@ -27,8 +26,6 @@ interface StreamingState {
2726
responseId: string;
2827
/** The model name. */
2928
responseModel: string;
30-
/** The timestamp of the response. */
31-
responseTimestamp: number;
3229
/** Number of prompt/input tokens used. */
3330
promptTokens: number | undefined;
3431
/** Number of completion/output tokens used. */
@@ -158,7 +155,6 @@ export async function* instrumentStream(
158155
finishReasons: [],
159156
responseId: '',
160157
responseModel: '',
161-
responseTimestamp: 0,
162158
promptTokens: undefined,
163159
completionTokens: undefined,
164160
cacheCreationInputTokens: undefined,
@@ -182,11 +178,6 @@ export async function* instrumentStream(
182178
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: state.responseModel,
183179
});
184180
}
185-
if (state.responseTimestamp) {
186-
span.setAttributes({
187-
[ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: new Date(state.responseTimestamp * 1000).toISOString(),
188-
});
189-
}
190181

191182
setTokenUsageAttributes(
192183
span,

0 commit comments

Comments
 (0)