Skip to content

Commit 888e107

Browse files
committed
Simplify openai mock for cloudflare
1 parent eda7807 commit 888e107

File tree

1 file changed

+1
-196
lines changed
  • dev-packages/cloudflare-integration-tests/suites/tracing/openai

1 file changed

+1
-196
lines changed
Lines changed: 1 addition & 196 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,7 @@
11
import type { OpenAiClient } from '@sentry/core';
22

33
export class MockOpenAi implements OpenAiClient {
4-
public chat: Record<string, unknown>;
5-
public responses: {
6-
create: (...args: unknown[]) => Promise<unknown>;
7-
};
8-
4+
public chat?: Record<string, unknown>;
95
public apiKey: string;
106

117
public constructor(config: { apiKey: string }) {
@@ -25,11 +21,6 @@ export class MockOpenAi implements OpenAiClient {
2521
throw error;
2622
}
2723

28-
// If stream is requested, return an async generator
29-
if (params.stream) {
30-
return this.createChatCompletionStream(params);
31-
}
32-
3324
return {
3425
id: 'chatcmpl-mock123',
3526
object: 'chat.completion',
@@ -55,191 +46,5 @@ export class MockOpenAi implements OpenAiClient {
5546
},
5647
},
5748
};
58-
59-
this.responses = {
60-
create: async (...args: unknown[]) => {
61-
const params = args[0] as { model: string; input: string; instructions: string; stream?: boolean };
62-
await new Promise(resolve => setTimeout(resolve, 10));
63-
64-
// If stream is requested, return an async generator
65-
if (params.stream) {
66-
return this.createResponsesApiStream(params);
67-
}
68-
69-
return {
70-
id: 'resp_mock456',
71-
object: 'response',
72-
created_at: 1677652290,
73-
model: params.model,
74-
input_text: params.input,
75-
output_text: `Response to: ${params.input}`,
76-
status: 'completed',
77-
usage: {
78-
input_tokens: 5,
79-
output_tokens: 8,
80-
total_tokens: 13,
81-
},
82-
};
83-
},
84-
};
85-
}
86-
87-
// Create a mock streaming response for chat completions
88-
public async *createChatCompletionStream(params: { model: string }): AsyncGenerator<unknown> {
89-
// First chunk with basic info
90-
yield {
91-
id: 'chatcmpl-stream-123',
92-
object: 'chat.completion.chunk',
93-
created: 1677652300,
94-
model: params.model,
95-
system_fingerprint: 'fp_stream_123',
96-
choices: [
97-
{
98-
index: 0,
99-
delta: {
100-
role: 'assistant',
101-
content: 'Hello',
102-
},
103-
finish_reason: null,
104-
},
105-
],
106-
};
107-
108-
// Second chunk with more content
109-
yield {
110-
id: 'chatcmpl-stream-123',
111-
object: 'chat.completion.chunk',
112-
created: 1677652300,
113-
model: params.model,
114-
system_fingerprint: 'fp_stream_123',
115-
choices: [
116-
{
117-
index: 0,
118-
delta: {
119-
content: ' from OpenAI streaming!',
120-
},
121-
finish_reason: 'stop',
122-
},
123-
],
124-
usage: {
125-
prompt_tokens: 12,
126-
completion_tokens: 18,
127-
total_tokens: 30,
128-
completion_tokens_details: {
129-
accepted_prediction_tokens: 0,
130-
audio_tokens: 0,
131-
reasoning_tokens: 0,
132-
rejected_prediction_tokens: 0,
133-
},
134-
prompt_tokens_details: {
135-
audio_tokens: 0,
136-
cached_tokens: 0,
137-
},
138-
},
139-
};
140-
}
141-
142-
// Create a mock streaming response for responses API
143-
public async *createResponsesApiStream(params: {
144-
model: string;
145-
input: string;
146-
instructions: string;
147-
}): AsyncGenerator<unknown> {
148-
// Response created event
149-
yield {
150-
type: 'response.created',
151-
response: {
152-
id: 'resp_stream_456',
153-
object: 'response',
154-
created_at: 1677652310,
155-
model: params.model,
156-
status: 'in_progress',
157-
error: null,
158-
incomplete_details: null,
159-
instructions: params.instructions,
160-
max_output_tokens: 1000,
161-
parallel_tool_calls: false,
162-
previous_response_id: null,
163-
reasoning: {
164-
effort: null,
165-
summary: null,
166-
},
167-
store: false,
168-
temperature: 0.7,
169-
text: {
170-
format: {
171-
type: 'text',
172-
},
173-
},
174-
tool_choice: 'auto',
175-
top_p: 1.0,
176-
truncation: 'disabled',
177-
user: null,
178-
metadata: {},
179-
output: [],
180-
output_text: '',
181-
usage: {
182-
input_tokens: 0,
183-
output_tokens: 0,
184-
total_tokens: 0,
185-
},
186-
},
187-
sequence_number: 1,
188-
};
189-
190-
// Response in progress with output text delta
191-
yield {
192-
type: 'response.output_text.delta',
193-
delta: 'Streaming response to: ',
194-
sequence_number: 2,
195-
};
196-
197-
yield {
198-
type: 'response.output_text.delta',
199-
delta: params.input,
200-
sequence_number: 3,
201-
};
202-
203-
// Response completed event
204-
yield {
205-
type: 'response.completed',
206-
response: {
207-
id: 'resp_stream_456',
208-
object: 'response',
209-
created_at: 1677652310,
210-
model: params.model,
211-
status: 'completed',
212-
error: null,
213-
incomplete_details: null,
214-
instructions: params.instructions,
215-
max_output_tokens: 1000,
216-
parallel_tool_calls: false,
217-
previous_response_id: null,
218-
reasoning: {
219-
effort: null,
220-
summary: null,
221-
},
222-
store: false,
223-
temperature: 0.7,
224-
text: {
225-
format: {
226-
type: 'text',
227-
},
228-
},
229-
tool_choice: 'auto',
230-
top_p: 1.0,
231-
truncation: 'disabled',
232-
user: null,
233-
metadata: {},
234-
output: [],
235-
output_text: params.input,
236-
usage: {
237-
input_tokens: 6,
238-
output_tokens: 10,
239-
total_tokens: 16,
240-
},
241-
},
242-
sequence_number: 4,
243-
};
24449
}
24550
}

0 commit comments

Comments
 (0)