diff --git a/src/api/providers/__tests__/openai-native.spec.ts b/src/api/providers/__tests__/openai-native.spec.ts index fdd71ba3f6..69e6b59f73 100644 --- a/src/api/providers/__tests__/openai-native.spec.ts +++ b/src/api/providers/__tests__/openai-native.spec.ts @@ -461,6 +461,101 @@ describe("OpenAiNativeHandler", () => { }) describe("GPT-5 models", () => { + it("should use temperature 1.0 as default for GPT-5 models", async () => { + // Test GPT-5 model without custom temperature + handler = new OpenAiNativeHandler({ + ...mockOptions, + apiModelId: "gpt-5-2025-08-07", + }) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Verify temperature 1.0 is used as default for GPT-5 + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "gpt-5-2025-08-07", + temperature: 1.0, // Default temperature for GPT-5 + messages: [{ role: "developer", content: expect.stringContaining(systemPrompt) }], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort: "minimal", + verbosity: "medium", + }), + ) + }) + + it("should respect custom temperature for GPT-5 models", async () => { + // Test GPT-5 model with custom temperature + handler = new OpenAiNativeHandler({ + ...mockOptions, + apiModelId: "gpt-5-2025-08-07", + modelTemperature: 0.7, + }) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Verify custom temperature is used when specified + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "gpt-5-2025-08-07", + temperature: 0.7, // Custom temperature + messages: [{ role: "developer", content: expect.stringContaining(systemPrompt) }], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort: "minimal", + verbosity: "medium", + }), + ) + }) + + it("should use temperature 1.0 for GPT-5-mini and GPT-5-nano models", async () => { + // Test GPT-5-mini + handler = new OpenAiNativeHandler({ + ...mockOptions, + apiModelId: "gpt-5-mini-2025-08-07", + }) + + let stream = handler.createMessage(systemPrompt, messages) + for await (const chunk of stream) { + // consume stream + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "gpt-5-mini-2025-08-07", + temperature: 1.0, // Default temperature for GPT-5 variants + }), + ) + + mockCreate.mockClear() + + // Test GPT-5-nano + handler = new OpenAiNativeHandler({ + ...mockOptions, + apiModelId: "gpt-5-nano-2025-08-07", + }) + + stream = handler.createMessage(systemPrompt, messages) + for await (const chunk of stream) { + // consume stream + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "gpt-5-nano-2025-08-07", + temperature: 1.0, // Default temperature for GPT-5 variants + }), + ) + }) + it("should handle GPT-5 model with developer role", async () => { handler = new OpenAiNativeHandler({ ...mockOptions, diff --git a/src/api/providers/openai-native.ts b/src/api/providers/openai-native.ts index 5e498bee45..cbb1502bb1 100644 --- a/src/api/providers/openai-native.ts +++ b/src/api/providers/openai-native.ts @@ -145,12 +145,12 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio systemPrompt: string, messages: Anthropic.Messages.MessageParam[], ): ApiStream { - const { reasoning, verbosity } = this.getModel() + const { reasoning, verbosity, temperature } = this.getModel() // Prepare the request parameters const params: any = { model: model.id, - temperature: this.options.modelTemperature ?? OPENAI_NATIVE_DEFAULT_TEMPERATURE, + temperature: temperature, messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], stream: true, stream_options: { include_usage: true }, @@ -191,10 +191,14 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio // Get verbosity from model settings, default to "medium" if not specified const verbosity = model.verbosity || "medium" + // Get temperature from model settings + const temperature = model.temperature + // Prepare the request parameters for Responses API - const params: GPT5ResponsesAPIParams = { + const params: GPT5ResponsesAPIParams & { temperature?: number } = { model: model.id, input: formattedInput, + temperature: temperature, ...(reasoningEffort && { reasoning: { effort: reasoningEffort, @@ -246,7 +250,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio } private async makeGpt5ResponsesAPIRequest( - params: GPT5ResponsesAPIParams, + params: GPT5ResponsesAPIParams & { temperature?: number }, model: OpenAiNativeModel, ): Promise> { // The OpenAI SDK doesn't have direct support for the Responses API yet, @@ -266,6 +270,11 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio stream_options: { include_usage: true }, } + // Add temperature if specified + if ("temperature" in params && params.temperature !== undefined) { + requestParams.temperature = params.temperature + } + // Add reasoning effort if specified (supporting "minimal" for GPT-5) if (params.reasoning?.effort) { if (params.reasoning.effort === "minimal") { @@ -351,6 +360,11 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio } } + /** + * Checks if a model is a GPT-5 variant. + * GPT-5 models use temperature 1.0 as default for optimal performance, + * as this setting provides the best balance between creativity and coherence. + */ private isGpt5Model(modelId: string): boolean { return modelId.startsWith("gpt-5") } @@ -401,12 +415,16 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio const info: ModelInfo = openAiNativeModels[id] + // For GPT-5 models, use temperature 1.0 as default for optimal performance + // This provides the best balance between creativity and coherence for GPT-5 models + const defaultTemp = this.isGpt5Model(id) ? 1.0 : OPENAI_NATIVE_DEFAULT_TEMPERATURE + const params = getModelParams({ format: "openai", modelId: id, model: info, settings: this.options, - defaultTemperature: OPENAI_NATIVE_DEFAULT_TEMPERATURE, + defaultTemperature: defaultTemp, }) // For GPT-5 models, ensure we support minimal reasoning effort