Skip to content

Commit b672f40

Browse files
committed
add tests
1 parent 740ac92 commit b672f40

File tree

4 files changed

+182
-17
lines changed

4 files changed

+182
-17
lines changed
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
import { instrumentAnthropicAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
function createMockStreamEvents(model = 'claude-3-haiku-20240307') {
5+
async function* generator() {
6+
// Provide message metadata early so the span can capture id/model/usage input tokens
7+
yield {
8+
type: 'content_block_start',
9+
message: {
10+
id: 'msg_stream_1',
11+
type: 'message',
12+
role: 'assistant',
13+
model,
14+
content: [],
15+
stop_reason: 'end_turn',
16+
stop_sequence: null,
17+
usage: {
18+
input_tokens: 10,
19+
},
20+
},
21+
};
22+
23+
// Streamed text chunks
24+
yield { type: 'content_block_delta', delta: { text: 'Hello ' } };
25+
yield { type: 'content_block_delta', delta: { text: 'from ' } };
26+
yield { type: 'content_block_delta', delta: { text: 'stream!' } };
27+
28+
// Final usage totals for output tokens
29+
yield { type: 'message_delta', usage: { output_tokens: 15 } };
30+
}
31+
32+
return generator();
33+
}
34+
35+
class MockAnthropic {
36+
constructor(config) {
37+
this.apiKey = config.apiKey;
38+
39+
this.messages = {
40+
create: this._messagesCreate.bind(this),
41+
stream: this._messagesStream.bind(this),
42+
};
43+
}
44+
45+
async _messagesCreate(params) {
46+
await new Promise(resolve => setTimeout(resolve, 5));
47+
if (params?.stream === true) {
48+
return createMockStreamEvents(params.model);
49+
}
50+
// Fallback non-streaming behavior (not used in this scenario)
51+
return {
52+
id: 'msg_mock123',
53+
type: 'message',
54+
model: params.model,
55+
role: 'assistant',
56+
content: [
57+
{
58+
type: 'text',
59+
text: 'Hello from Anthropic mock!',
60+
},
61+
],
62+
stop_reason: 'end_turn',
63+
stop_sequence: null,
64+
usage: {
65+
input_tokens: 10,
66+
output_tokens: 15,
67+
},
68+
};
69+
}
70+
71+
async _messagesStream(params) {
72+
await new Promise(resolve => setTimeout(resolve, 5));
73+
return createMockStreamEvents(params?.model);
74+
}
75+
}
76+
77+
async function run() {
78+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
79+
const mockClient = new MockAnthropic({ apiKey: 'mock-api-key' });
80+
const client = instrumentAnthropicAiClient(mockClient);
81+
82+
// 1) Streaming via stream: true param on messages.create
83+
const stream1 = await client.messages.create({
84+
model: 'claude-3-haiku-20240307',
85+
messages: [{ role: 'user', content: 'Stream this please' }],
86+
stream: true,
87+
});
88+
for await (const _ of stream1) {
89+
void _;
90+
}
91+
92+
// 2) Streaming via messages.stream API
93+
const stream2 = await client.messages.stream({
94+
model: 'claude-3-haiku-20240307',
95+
messages: [{ role: 'user', content: 'Stream this too' }],
96+
});
97+
for await (const _ of stream2) {
98+
void _;
99+
}
100+
});
101+
}
102+
103+
run();
104+
105+

dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,4 +218,79 @@ describe('Anthropic integration', () => {
218218
.completed();
219219
});
220220
});
221+
222+
const EXPECTED_STREAM_SPANS_PII_FALSE = {
223+
transaction: 'main',
224+
spans: expect.arrayContaining([
225+
// messages.create with stream: true
226+
expect.objectContaining({
227+
description: 'messages claude-3-haiku-20240307 stream-response',
228+
op: 'gen_ai.messages',
229+
data: expect.objectContaining({
230+
'gen_ai.system': 'anthropic',
231+
'gen_ai.operation.name': 'messages',
232+
'gen_ai.request.model': 'claude-3-haiku-20240307',
233+
'gen_ai.request.stream': true,
234+
'gen_ai.response.streaming': true,
235+
'gen_ai.response.model': 'claude-3-haiku-20240307',
236+
'gen_ai.response.id': 'msg_stream_1',
237+
'gen_ai.usage.input_tokens': 10,
238+
'gen_ai.usage.output_tokens': 15,
239+
'gen_ai.usage.total_tokens': 25,
240+
'gen_ai.response.finish_reasons': '["end_turn"]',
241+
}),
242+
}),
243+
// messages.stream
244+
expect.objectContaining({
245+
description: 'messages claude-3-haiku-20240307 stream-response',
246+
op: 'gen_ai.messages',
247+
data: expect.objectContaining({
248+
'gen_ai.system': 'anthropic',
249+
'gen_ai.operation.name': 'messages',
250+
'gen_ai.request.model': 'claude-3-haiku-20240307',
251+
'gen_ai.response.streaming': true,
252+
'gen_ai.response.model': 'claude-3-haiku-20240307',
253+
'gen_ai.response.id': 'msg_stream_1',
254+
'gen_ai.usage.input_tokens': 10,
255+
'gen_ai.usage.output_tokens': 15,
256+
'gen_ai.usage.total_tokens': 25,
257+
}),
258+
}),
259+
]),
260+
};
261+
262+
const EXPECTED_STREAM_SPANS_PII_TRUE = {
263+
transaction: 'main',
264+
spans: expect.arrayContaining([
265+
expect.objectContaining({
266+
description: 'messages claude-3-haiku-20240307 stream-response',
267+
op: 'gen_ai.messages',
268+
data: expect.objectContaining({
269+
'gen_ai.response.streaming': true,
270+
// streamed text concatenated
271+
'gen_ai.response.text': 'Hello from stream!',
272+
}),
273+
}),
274+
expect.objectContaining({
275+
description: 'messages claude-3-haiku-20240307 stream-response',
276+
op: 'gen_ai.messages',
277+
data: expect.objectContaining({
278+
'gen_ai.response.streaming': true,
279+
'gen_ai.response.text': 'Hello from stream!',
280+
}),
281+
}),
282+
]),
283+
};
284+
285+
createEsmAndCjsTests(__dirname, 'scenario-stream.mjs', 'instrument.mjs', (createRunner, test) => {
286+
test('streams produce spans with token usage and metadata (PII false)', async () => {
287+
await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_SPANS_PII_FALSE }).start().completed();
288+
});
289+
});
290+
291+
createEsmAndCjsTests(__dirname, 'scenario-stream.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
292+
test('streams record response text when PII true', async () => {
293+
await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_SPANS_PII_TRUE }).start().completed();
294+
});
295+
});
221296
});

packages/core/src/utils/anthropic-ai/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ function instrumentMethod<T extends unknown[], R>(
191191

192192
const result = await originalMethod.apply(context, args);
193193
return instrumentStream(
194-
result as unknown as AsyncIterable<AnthropicAiStreamingEvent>,
194+
result as AsyncIterable<AnthropicAiStreamingEvent>,
195195
span,
196196
finalOptions.recordOutputs ?? false,
197197
) as unknown as R;

packages/core/src/utils/anthropic-ai/streaming.ts

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ import { captureException } from '../../exports';
22
import { SPAN_STATUS_ERROR } from '../../tracing';
33
import type { Span } from '../../types-hoist/span';
44
import {
5-
ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE,
65
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
76
GEN_AI_RESPONSE_ID_ATTRIBUTE,
87
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
@@ -17,8 +16,6 @@ import type { AnthropicAiStreamingEvent } from './types';
1716
*/
1817

1918
interface StreamingState {
20-
/** Types of events encountered in the stream. */
21-
eventTypes: string[];
2219
/** Collected response text fragments (for output recording). */
2320
responseTexts: string[];
2421
/** Reasons for finishing the response, as reported by the API. */
@@ -27,8 +24,6 @@ interface StreamingState {
2724
responseId: string;
2825
/** The model name. */
2926
responseModel: string;
30-
/** The timestamp of the response. */
31-
responseTimestamp: number;
3227
/** Number of prompt/input tokens used. */
3328
promptTokens: number | undefined;
3429
/** Number of completion/output tokens used. */
@@ -55,8 +50,6 @@ function isErrorEvent(
5550
span: Span,
5651
): boolean {
5752
if ('type' in event && typeof event.type === 'string') {
58-
state.eventTypes.push(event.type);
59-
6053
// If the event is an error, set the span status and capture the error
6154
// These error events are not rejected by the API by default, but are sent as metadata of the response
6255
if (event.type === 'error') {
@@ -78,7 +71,7 @@ function isErrorEvent(
7871
}
7972

8073
if (recordOutputs && event.type === 'content_block_delta') {
81-
const text = event.delta?.text ?? '';
74+
const text = event.delta?.text;
8275
if (text) state.responseTexts.push(text);
8376
}
8477
}
@@ -132,7 +125,6 @@ function processEvent(
132125
span: Span,
133126
): void {
134127
if (!(event && typeof event === 'object')) {
135-
state.eventTypes.push('unknown:non-object');
136128
return;
137129
}
138130

@@ -153,12 +145,10 @@ export async function* instrumentStream(
153145
recordOutputs: boolean,
154146
): AsyncGenerator<AnthropicAiStreamingEvent, void, unknown> {
155147
const state: StreamingState = {
156-
eventTypes: [],
157148
responseTexts: [],
158149
finishReasons: [],
159150
responseId: '',
160151
responseModel: '',
161-
responseTimestamp: 0,
162152
promptTokens: undefined,
163153
completionTokens: undefined,
164154
cacheCreationInputTokens: undefined,
@@ -182,11 +172,6 @@ export async function* instrumentStream(
182172
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: state.responseModel,
183173
});
184174
}
185-
if (state.responseTimestamp) {
186-
span.setAttributes({
187-
[ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: new Date(state.responseTimestamp * 1000).toISOString(),
188-
});
189-
}
190175

191176
setTokenUsageAttributes(
192177
span,

0 commit comments

Comments
 (0)