Skip to content

Commit 78d3d35

Browse files
committed
add tests
1 parent 4e48051 commit 78d3d35

File tree

4 files changed

+180
-10
lines changed

4 files changed

+180
-10
lines changed
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
import { instrumentAnthropicAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
function createMockStreamEvents(model = 'claude-3-haiku-20240307') {
5+
async function* generator() {
6+
// Provide message metadata early so the span can capture id/model/usage input tokens
7+
yield {
8+
type: 'content_block_start',
9+
message: {
10+
id: 'msg_stream_1',
11+
type: 'message',
12+
role: 'assistant',
13+
model,
14+
content: [],
15+
stop_reason: 'end_turn',
16+
stop_sequence: null,
17+
usage: {
18+
input_tokens: 10,
19+
},
20+
},
21+
};
22+
23+
// Streamed text chunks
24+
yield { type: 'content_block_delta', delta: { text: 'Hello ' } };
25+
yield { type: 'content_block_delta', delta: { text: 'from ' } };
26+
yield { type: 'content_block_delta', delta: { text: 'stream!' } };
27+
28+
// Final usage totals for output tokens
29+
yield { type: 'message_delta', usage: { output_tokens: 15 } };
30+
}
31+
32+
return generator();
33+
}
34+
35+
class MockAnthropic {
36+
constructor(config) {
37+
this.apiKey = config.apiKey;
38+
39+
this.messages = {
40+
create: this._messagesCreate.bind(this),
41+
stream: this._messagesStream.bind(this),
42+
};
43+
}
44+
45+
async _messagesCreate(params) {
46+
await new Promise(resolve => setTimeout(resolve, 5));
47+
if (params?.stream === true) {
48+
return createMockStreamEvents(params.model);
49+
}
50+
// Fallback non-streaming behavior (not used in this scenario)
51+
return {
52+
id: 'msg_mock123',
53+
type: 'message',
54+
model: params.model,
55+
role: 'assistant',
56+
content: [
57+
{
58+
type: 'text',
59+
text: 'Hello from Anthropic mock!',
60+
},
61+
],
62+
stop_reason: 'end_turn',
63+
stop_sequence: null,
64+
usage: {
65+
input_tokens: 10,
66+
output_tokens: 15,
67+
},
68+
};
69+
}
70+
71+
async _messagesStream(params) {
72+
await new Promise(resolve => setTimeout(resolve, 5));
73+
return createMockStreamEvents(params?.model);
74+
}
75+
}
76+
77+
async function run() {
78+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
79+
const mockClient = new MockAnthropic({ apiKey: 'mock-api-key' });
80+
const client = instrumentAnthropicAiClient(mockClient);
81+
82+
// 1) Streaming via stream: true param on messages.create
83+
const stream1 = await client.messages.create({
84+
model: 'claude-3-haiku-20240307',
85+
messages: [{ role: 'user', content: 'Stream this please' }],
86+
stream: true,
87+
});
88+
for await (const _ of stream1) {
89+
void _;
90+
}
91+
92+
// 2) Streaming via messages.stream API
93+
const stream2 = await client.messages.stream({
94+
model: 'claude-3-haiku-20240307',
95+
messages: [{ role: 'user', content: 'Stream this too' }],
96+
});
97+
for await (const _ of stream2) {
98+
void _;
99+
}
100+
});
101+
}
102+
103+
run();
104+
105+

dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,4 +219,78 @@ describe('Anthropic integration', () => {
219219
.completed();
220220
});
221221
});
222+
223+
const EXPECTED_STREAM_SPANS_PII_FALSE = {
224+
transaction: 'main',
225+
spans: expect.arrayContaining([
226+
// messages.create with stream: true
227+
expect.objectContaining({
228+
description: 'messages.create claude-3-haiku-20240307 stream-response',
229+
op: 'gen_ai.messages.create',
230+
data: expect.objectContaining({
231+
'gen_ai.system': 'anthropic',
232+
'gen_ai.operation.name': 'messages.create',
233+
'gen_ai.request.model': 'claude-3-haiku-20240307',
234+
'gen_ai.request.stream': true,
235+
'gen_ai.response.streaming': true,
236+
'gen_ai.response.model': 'claude-3-haiku-20240307',
237+
'gen_ai.response.id': 'msg_stream_1',
238+
'gen_ai.usage.input_tokens': 10,
239+
'gen_ai.usage.output_tokens': 15,
240+
'gen_ai.usage.total_tokens': 25,
241+
}),
242+
}),
243+
// messages.stream
244+
expect.objectContaining({
245+
description: 'stream claude-3-haiku-20240307 stream-response',
246+
op: 'gen_ai.stream',
247+
data: expect.objectContaining({
248+
'gen_ai.system': 'anthropic',
249+
'gen_ai.operation.name': 'stream',
250+
'gen_ai.request.model': 'claude-3-haiku-20240307',
251+
'gen_ai.response.streaming': true,
252+
'gen_ai.response.model': 'claude-3-haiku-20240307',
253+
'gen_ai.response.id': 'msg_stream_1',
254+
'gen_ai.usage.input_tokens': 10,
255+
'gen_ai.usage.output_tokens': 15,
256+
'gen_ai.usage.total_tokens': 25,
257+
}),
258+
}),
259+
]),
260+
};
261+
262+
const EXPECTED_STREAM_SPANS_PII_TRUE = {
263+
transaction: 'main',
264+
spans: expect.arrayContaining([
265+
expect.objectContaining({
266+
description: 'messages.create claude-3-haiku-20240307 stream-response',
267+
op: 'gen_ai.messages.create',
268+
data: expect.objectContaining({
269+
'gen_ai.response.streaming': true,
270+
// streamed text concatenated
271+
'gen_ai.response.text': 'Hello from stream!',
272+
}),
273+
}),
274+
expect.objectContaining({
275+
description: 'stream claude-3-haiku-20240307 stream-response',
276+
op: 'gen_ai.stream',
277+
data: expect.objectContaining({
278+
'gen_ai.response.streaming': true,
279+
'gen_ai.response.text': 'Hello from stream!',
280+
}),
281+
}),
282+
]),
283+
};
284+
285+
createEsmAndCjsTests(__dirname, 'scenario-stream.mjs', 'instrument.mjs', (createRunner, test) => {
286+
test('streams produce spans with token usage and metadata (PII false)', async () => {
287+
await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_SPANS_PII_FALSE }).start().completed();
288+
});
289+
});
290+
291+
createEsmAndCjsTests(__dirname, 'scenario-stream.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
292+
test('streams record response text when PII true', async () => {
293+
await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_SPANS_PII_TRUE }).start().completed();
294+
});
295+
});
222296
});

packages/core/src/utils/anthropic-ai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ function instrumentMethod<T extends unknown[], R>(
191191

192192
const result = await originalMethod.apply(context, args);
193193
return instrumentStream(
194-
result as unknown as AsyncIterable<AnthropicAiStreamingEvent>,
194+
result as AsyncIterable<AnthropicAiStreamingEvent>,
195195
span,
196196
finalOptions.recordOutputs ?? false,
197197
) as unknown as R;

packages/core/src/utils/anthropic-ai/streaming.ts

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ import { captureException } from '../../exports';
22
import { SPAN_STATUS_ERROR } from '../../tracing';
33
import type { Span } from '../../types-hoist/span';
44
import {
5-
ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE,
65
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
76
GEN_AI_RESPONSE_ID_ATTRIBUTE,
87
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
@@ -27,8 +26,6 @@ interface StreamingState {
2726
responseId: string;
2827
/** The model name. */
2928
responseModel: string;
30-
/** The timestamp of the response. */
31-
responseTimestamp: number;
3229
/** Number of prompt/input tokens used. */
3330
promptTokens: number | undefined;
3431
/** Number of completion/output tokens used. */
@@ -158,7 +155,6 @@ export async function* instrumentStream(
158155
finishReasons: [],
159156
responseId: '',
160157
responseModel: '',
161-
responseTimestamp: 0,
162158
promptTokens: undefined,
163159
completionTokens: undefined,
164160
cacheCreationInputTokens: undefined,
@@ -182,11 +178,6 @@ export async function* instrumentStream(
182178
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: state.responseModel,
183179
});
184180
}
185-
if (state.responseTimestamp) {
186-
span.setAttributes({
187-
[ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: new Date(state.responseTimestamp * 1000).toISOString(),
188-
});
189-
}
190181

191182
setTokenUsageAttributes(
192183
span,

0 commit comments

Comments
 (0)