Skip to content

Commit 305fd52

Browse files
committed
quick refactor
1 parent 4fba247 commit 305fd52

File tree

4 files changed

+19
-12
lines changed

4 files changed

+19
-12
lines changed

dev-packages/node-integration-tests/suites/tracing/openai/scenario.mjs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ class MockOpenAI {
222222
user: null,
223223
metadata: {},
224224
output: [],
225-
output_text: `Streaming response to: ${params.input}`,
225+
output_text: params.input,
226226
usage: {
227227
input_tokens: 6,
228228
output_tokens: 10,

dev-packages/node-integration-tests/suites/tracing/openai/test.ts

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ describe('OpenAI integration', () => {
8484
'gen_ai.request.stream': true,
8585
'gen_ai.response.model': 'gpt-4',
8686
'gen_ai.response.id': 'chatcmpl-stream-123',
87-
'gen_ai.response.finish_reasons': 'stop',
87+
'gen_ai.response.finish_reasons': '["stop"]',
8888
'gen_ai.usage.input_tokens': 12,
8989
'gen_ai.usage.output_tokens': 18,
9090
'gen_ai.usage.total_tokens': 30,
@@ -110,7 +110,7 @@ describe('OpenAI integration', () => {
110110
'gen_ai.request.stream': true,
111111
'gen_ai.response.model': 'gpt-4',
112112
'gen_ai.response.id': 'resp_stream_456',
113-
'gen_ai.response.finish_reasons': 'in_progress',
113+
'gen_ai.response.finish_reasons': '["in_progress"]',
114114
'gen_ai.usage.input_tokens': 0,
115115
'gen_ai.usage.output_tokens': 0,
116116
'gen_ai.usage.total_tokens': 0,
@@ -210,9 +210,10 @@ describe('OpenAI integration', () => {
210210
'gen_ai.request.model': 'gpt-4',
211211
'gen_ai.request.temperature': 0.8,
212212
'gen_ai.request.stream': true,
213-
'gen_ai.request.messages': expect.stringContaining('Tell me about streaming'),
213+
'gen_ai.request.messages':
214+
'[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"Tell me about streaming"}]',
214215
'gen_ai.response.text': 'Hello from OpenAI streaming!',
215-
'gen_ai.response.finish_reasons': 'stop',
216+
'gen_ai.response.finish_reasons': '["stop"]',
216217
'gen_ai.response.id': 'chatcmpl-stream-123',
217218
'gen_ai.response.model': 'gpt-4',
218219
'gen_ai.usage.input_tokens': 12,
@@ -240,8 +241,8 @@ describe('OpenAI integration', () => {
240241
'gen_ai.request.model': 'gpt-4',
241242
'gen_ai.request.stream': true,
242243
'gen_ai.request.messages': '"Test streaming responses API"',
243-
'gen_ai.response.text': expect.stringContaining('Streaming response to: Test streaming responses API'),
244-
'gen_ai.response.finish_reasons': 'completed',
244+
'gen_ai.response.text': 'Streaming response to: Test streaming responses APITest streaming responses API',
245+
'gen_ai.response.finish_reasons': '["in_progress","completed"]',
245246
'gen_ai.response.id': 'resp_stream_456',
246247
'gen_ai.response.model': 'gpt-4',
247248
'gen_ai.usage.input_tokens': 6,
@@ -265,7 +266,7 @@ describe('OpenAI integration', () => {
265266
'gen_ai.operation.name': 'chat',
266267
'gen_ai.request.model': 'error-model',
267268
'gen_ai.request.stream': true,
268-
'gen_ai.request.messages': expect.stringContaining('This will fail'),
269+
'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]',
269270
'gen_ai.system': 'openai',
270271
'openai.response.stream': true,
271272
'sentry.op': 'gen_ai.chat',

packages/core/src/utils/openai/streaming.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ export async function* instrumentStream<T>(
124124

125125
if (state.finishReasons.length) {
126126
span.setAttributes({
127-
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: state.finishReasons[state.finishReasons.length - 1],
127+
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify(state.finishReasons),
128128
});
129129
}
130130

packages/core/src/utils/openai/utils.ts

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,13 @@ import {
1313
OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE,
1414
} from '../gen-ai-attributes';
1515
import { INSTRUMENTED_METHODS } from './constants';
16-
import type { InstrumentedMethod, OpenAiChatCompletionObject, OpenAIResponseObject } from './types';
16+
import type {
17+
ChatCompletionChunk,
18+
InstrumentedMethod,
19+
OpenAiChatCompletionObject,
20+
OpenAIResponseObject,
21+
ResponseStreamingEvent,
22+
} from './types';
1723

1824
/**
1925
* Maps OpenAI method paths to Sentry operation names
@@ -78,7 +84,7 @@ export function isResponsesApiResponse(response: unknown): response is OpenAIRes
7884
/**
7985
* Check if streaming event is from the Responses API
8086
*/
81-
export function isResponsesApiStreamEvent(event: unknown): boolean {
87+
export function isResponsesApiStreamEvent(event: unknown): event is ResponseStreamingEvent {
8288
return (
8389
event !== null &&
8490
typeof event === 'object' &&
@@ -91,7 +97,7 @@ export function isResponsesApiStreamEvent(event: unknown): boolean {
9197
/**
9298
* Check if streaming event is a chat completion chunk
9399
*/
94-
export function isChatCompletionChunk(event: unknown): boolean {
100+
export function isChatCompletionChunk(event: unknown): event is ChatCompletionChunk {
95101
return (
96102
event !== null &&
97103
typeof event === 'object' &&

0 commit comments

Comments
 (0)