Skip to content

Commit c8ad976

Browse files
committed
fix: remove unreachable code and ensure Codex Mini gets reasoning effort support
- Removed unreachable GPT-5 specific code in completePrompt method - Updated getModel to apply reasoning effort to all responses API models (GPT-5 and Codex Mini) - Both models now properly support reasoning effort and verbosity through the responses API
1 parent ee9d7c6 commit c8ad976

File tree

1 file changed

+5
-16
lines changed

1 file changed

+5
-16
lines changed

src/api/providers/openai-native.ts

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1208,8 +1208,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
12081208
defaultTemperature: this.isGpt5Model(id) ? GPT5_DEFAULT_TEMPERATURE : OPENAI_NATIVE_DEFAULT_TEMPERATURE,
12091209
})
12101210

1211-
// For GPT-5 models, ensure we support minimal reasoning effort
1212-
if (this.isGpt5Model(id)) {
1211+
// For models using the Responses API (GPT-5 and Codex Mini), ensure we support reasoning effort
1212+
if (this.isResponsesApiModel(id)) {
12131213
const effort =
12141214
(this.options.reasoningEffort as ReasoningEffortWithMinimal | undefined) ??
12151215
(info.reasoningEffort as ReasoningEffortWithMinimal | undefined)
@@ -1262,20 +1262,9 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
12621262
params.temperature = temperature
12631263
}
12641264

1265-
// For GPT-5 models, add reasoning_effort and verbosity as top-level parameters
1266-
// (Note: This code path won't be reached for GPT-5 or Codex Mini since they use responses API)
1267-
if (this.isGpt5Model(id)) {
1268-
if (reasoning && "reasoning_effort" in reasoning) {
1269-
params.reasoning_effort = reasoning.reasoning_effort
1270-
}
1271-
if (verbosity) {
1272-
params.verbosity = verbosity
1273-
}
1274-
} else {
1275-
// For non-GPT-5 models, add reasoning as is
1276-
if (reasoning) {
1277-
Object.assign(params, reasoning)
1278-
}
1265+
// Add reasoning parameters for models that support them
1266+
if (reasoning) {
1267+
Object.assign(params, reasoning)
12791268
}
12801269

12811270
const response = await this.client.chat.completions.create(params)

0 commit comments

Comments
 (0)