Skip to content

Commit fea2e07

Browse files
committed
refactor: extract duplicate fetch logic into shared helper method
- Created makeResponsesApiRequest() to eliminate code duplication - Both handleCodexMiniMessage and completePrompt now use the same helper - Reduces maintenance overhead and ensures consistency - Includes all error handling and documentation in one place
1 parent 9187168 commit fea2e07

File tree

1 file changed

+31
-54
lines changed

1 file changed

+31
-54
lines changed

src/api/providers/openai-native.ts

Lines changed: 31 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -125,15 +125,16 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
125125
yield* this.handleStreamResponse(stream, model)
126126
}
127127

128-
private async *handleCodexMiniMessage(
129-
model: OpenAiNativeModel,
130-
systemPrompt: string,
131-
messages: Anthropic.Messages.MessageParam[],
132-
): ApiStream {
133-
// Convert messages to a single input string
134-
const input = this.convertMessagesToInput(messages)
135-
136-
// Make direct API call to v1/responses endpoint
128+
/**
129+
* Makes a request to the OpenAI Responses API endpoint
130+
* Used by codex-mini-latest model which requires the v1/responses endpoint
131+
*/
132+
private async makeResponsesApiRequest(
133+
modelId: string,
134+
instructions: string,
135+
input: string,
136+
stream: boolean = true,
137+
): Promise<Response> {
137138
// Note: Using fetch() instead of OpenAI client because the OpenAI SDK v5.0.0
138139
// does not support the v1/responses endpoint used by codex-mini-latest model.
139140
// This is a special endpoint that requires a different request/response format.
@@ -148,10 +149,10 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
148149
Authorization: `Bearer ${apiKey}`,
149150
},
150151
body: JSON.stringify({
151-
model: model.id,
152-
instructions: systemPrompt,
152+
model: modelId,
153+
instructions: instructions,
153154
input: input,
154-
stream: true,
155+
stream: stream,
155156
}),
156157
})
157158

@@ -160,7 +161,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
160161
throw new Error(`OpenAI Responses API error: ${response.status} ${response.statusText} - ${errorText}`)
161162
}
162163

163-
yield* this.handleResponsesStreamResponse(response.body, model, systemPrompt, input)
164+
return response
164165
} catch (error) {
165166
// Handle network failures and other errors
166167
if (error instanceof TypeError && error.message.includes("fetch")) {
@@ -173,6 +174,19 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
173174
}
174175
}
175176

177+
private async *handleCodexMiniMessage(
178+
model: OpenAiNativeModel,
179+
systemPrompt: string,
180+
messages: Anthropic.Messages.MessageParam[],
181+
): ApiStream {
182+
// Convert messages to a single input string
183+
const input = this.convertMessagesToInput(messages)
184+
185+
// Make API call using shared helper
186+
const response = await this.makeResponsesApiRequest(model.id, systemPrompt, input, true)
187+
yield* this.handleResponsesStreamResponse(response.body, model, systemPrompt, input)
188+
}
189+
176190
private convertMessagesToInput(messages: Anthropic.Messages.MessageParam[]): string {
177191
return messages
178192
.map((msg) => {
@@ -334,47 +348,10 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
334348
const { id, temperature, reasoning } = this.getModel()
335349

336350
if (id === "codex-mini-latest") {
337-
// Make direct API call to v1/responses endpoint
338-
// Note: Using fetch() instead of OpenAI client because the OpenAI SDK v5.0.0
339-
// does not support the v1/responses endpoint used by codex-mini-latest model.
340-
// This is a special endpoint that requires a different request/response format.
341-
const apiKey = this.options.openAiNativeApiKey ?? "not-provided"
342-
const baseURL = this.options.openAiNativeBaseUrl ?? "https://api.openai.com/v1"
343-
344-
try {
345-
const response = await fetch(`${baseURL}/responses`, {
346-
method: "POST",
347-
headers: {
348-
"Content-Type": "application/json",
349-
Authorization: `Bearer ${apiKey}`,
350-
},
351-
body: JSON.stringify({
352-
model: id,
353-
instructions: "Complete the following prompt:",
354-
input: prompt,
355-
stream: false,
356-
}),
357-
})
358-
359-
if (!response.ok) {
360-
const errorText = await response.text()
361-
throw new Error(
362-
`OpenAI Responses API error: ${response.status} ${response.statusText} - ${errorText}`,
363-
)
364-
}
365-
366-
const data = await response.json()
367-
return data.output_text || ""
368-
} catch (error) {
369-
// Handle network failures and other errors
370-
if (error instanceof TypeError && error.message.includes("fetch")) {
371-
throw new Error(`Network error while calling OpenAI Responses API: ${error.message}`)
372-
}
373-
if (error instanceof Error) {
374-
throw new Error(`OpenAI Responses API error: ${error.message}`)
375-
}
376-
throw new Error("Unknown error occurred while calling OpenAI Responses API")
377-
}
351+
// Make API call using shared helper
352+
const response = await this.makeResponsesApiRequest(id, "Complete the following prompt:", prompt, false)
353+
const data = await response.json()
354+
return data.output_text || ""
378355
}
379356

380357
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {

0 commit comments

Comments
 (0)