Skip to content

Commit c36dfc5

Browse files
committed
Revert "fix: omit temperature parameter when not explicitly set for OpenAI Compatible providers (#7188)"
This reverts commit 090737c.
1 parent 2e59347 commit c36dfc5

File tree

9 files changed

+14
-161
lines changed

9 files changed

+14
-161
lines changed

src/api/providers/__tests__/chutes.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -416,6 +416,7 @@ describe("ChutesHandler", () => {
416416
expect.objectContaining({
417417
model: modelId,
418418
max_tokens: modelInfo.maxTokens,
419+
temperature: 0.5,
419420
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
420421
stream: true,
421422
stream_options: { include_usage: true },

src/api/providers/__tests__/fireworks.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,7 @@ describe("FireworksHandler", () => {
373373
expect.objectContaining({
374374
model: modelId,
375375
max_tokens: modelInfo.maxTokens,
376+
temperature: 0.5,
376377
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
377378
stream: true,
378379
stream_options: { include_usage: true },

src/api/providers/__tests__/groq.spec.ts

Lines changed: 1 addition & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -160,11 +160,7 @@ describe("GroqHandler", () => {
160160
it("createMessage should pass correct parameters to Groq client", async () => {
161161
const modelId: GroqModelId = "llama-3.1-8b-instant"
162162
const modelInfo = groqModels[modelId]
163-
const handlerWithModel = new GroqHandler({
164-
apiModelId: modelId,
165-
groqApiKey: "test-groq-api-key",
166-
modelTemperature: 0.5, // Explicitly set temperature for this test
167-
})
163+
const handlerWithModel = new GroqHandler({ apiModelId: modelId, groqApiKey: "test-groq-api-key" })
168164

169165
mockCreate.mockImplementationOnce(() => {
170166
return {
@@ -194,77 +190,4 @@ describe("GroqHandler", () => {
194190
undefined,
195191
)
196192
})
197-
198-
it("should omit temperature when modelTemperature is undefined", async () => {
199-
const modelId: GroqModelId = "llama-3.1-8b-instant"
200-
const handlerWithoutTemp = new GroqHandler({
201-
apiModelId: modelId,
202-
groqApiKey: "test-groq-api-key",
203-
// modelTemperature is not set
204-
})
205-
206-
mockCreate.mockImplementationOnce(() => {
207-
return {
208-
[Symbol.asyncIterator]: () => ({
209-
async next() {
210-
return { done: true }
211-
},
212-
}),
213-
}
214-
})
215-
216-
const systemPrompt = "Test system prompt"
217-
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }]
218-
219-
const messageGenerator = handlerWithoutTemp.createMessage(systemPrompt, messages)
220-
await messageGenerator.next()
221-
222-
expect(mockCreate).toHaveBeenCalledWith(
223-
expect.objectContaining({
224-
model: modelId,
225-
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
226-
stream: true,
227-
}),
228-
undefined,
229-
)
230-
231-
// Verify temperature is NOT included
232-
const callArgs = mockCreate.mock.calls[0][0]
233-
expect(callArgs).not.toHaveProperty("temperature")
234-
})
235-
236-
it("should include temperature when modelTemperature is explicitly set", async () => {
237-
const modelId: GroqModelId = "llama-3.1-8b-instant"
238-
const handlerWithTemp = new GroqHandler({
239-
apiModelId: modelId,
240-
groqApiKey: "test-groq-api-key",
241-
modelTemperature: 0.7,
242-
})
243-
244-
mockCreate.mockImplementationOnce(() => {
245-
return {
246-
[Symbol.asyncIterator]: () => ({
247-
async next() {
248-
return { done: true }
249-
},
250-
}),
251-
}
252-
})
253-
254-
const systemPrompt = "Test system prompt"
255-
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }]
256-
257-
const messageGenerator = handlerWithTemp.createMessage(systemPrompt, messages)
258-
await messageGenerator.next()
259-
260-
expect(mockCreate).toHaveBeenCalledWith(
261-
expect.objectContaining({
262-
model: modelId,
263-
temperature: 0.7,
264-
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
265-
stream: true,
266-
}),
267-
undefined,
268-
)
269-
})
270193
})

src/api/providers/__tests__/openai.spec.ts

Lines changed: 1 addition & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -315,71 +315,6 @@ describe("OpenAiHandler", () => {
315315
const callArgs = mockCreate.mock.calls[0][0]
316316
expect(callArgs.max_completion_tokens).toBe(4096)
317317
})
318-
319-
it("should omit temperature when modelTemperature is undefined", async () => {
320-
const optionsWithoutTemperature: ApiHandlerOptions = {
321-
...mockOptions,
322-
// modelTemperature is not set, should not include temperature
323-
}
324-
const handlerWithoutTemperature = new OpenAiHandler(optionsWithoutTemperature)
325-
const stream = handlerWithoutTemperature.createMessage(systemPrompt, messages)
326-
// Consume the stream to trigger the API call
327-
for await (const _chunk of stream) {
328-
}
329-
// Assert the mockCreate was called without temperature
330-
expect(mockCreate).toHaveBeenCalled()
331-
const callArgs = mockCreate.mock.calls[0][0]
332-
expect(callArgs).not.toHaveProperty("temperature")
333-
})
334-
335-
it("should include temperature when modelTemperature is explicitly set to 0", async () => {
336-
const optionsWithZeroTemperature: ApiHandlerOptions = {
337-
...mockOptions,
338-
modelTemperature: 0,
339-
}
340-
const handlerWithZeroTemperature = new OpenAiHandler(optionsWithZeroTemperature)
341-
const stream = handlerWithZeroTemperature.createMessage(systemPrompt, messages)
342-
// Consume the stream to trigger the API call
343-
for await (const _chunk of stream) {
344-
}
345-
// Assert the mockCreate was called with temperature: 0
346-
expect(mockCreate).toHaveBeenCalled()
347-
const callArgs = mockCreate.mock.calls[0][0]
348-
expect(callArgs.temperature).toBe(0)
349-
})
350-
351-
it("should include temperature when modelTemperature is set to a non-zero value", async () => {
352-
const optionsWithCustomTemperature: ApiHandlerOptions = {
353-
...mockOptions,
354-
modelTemperature: 0.7,
355-
}
356-
const handlerWithCustomTemperature = new OpenAiHandler(optionsWithCustomTemperature)
357-
const stream = handlerWithCustomTemperature.createMessage(systemPrompt, messages)
358-
// Consume the stream to trigger the API call
359-
for await (const _chunk of stream) {
360-
}
361-
// Assert the mockCreate was called with temperature: 0.7
362-
expect(mockCreate).toHaveBeenCalled()
363-
const callArgs = mockCreate.mock.calls[0][0]
364-
expect(callArgs.temperature).toBe(0.7)
365-
})
366-
367-
it("should include DEEP_SEEK_DEFAULT_TEMPERATURE for deepseek-reasoner models when temperature is not set", async () => {
368-
const deepseekOptions: ApiHandlerOptions = {
369-
...mockOptions,
370-
openAiModelId: "deepseek-reasoner",
371-
// modelTemperature is not set
372-
}
373-
const deepseekHandler = new OpenAiHandler(deepseekOptions)
374-
const stream = deepseekHandler.createMessage(systemPrompt, messages)
375-
// Consume the stream to trigger the API call
376-
for await (const _chunk of stream) {
377-
}
378-
// Assert the mockCreate was called with DEEP_SEEK_DEFAULT_TEMPERATURE (0.6)
379-
expect(mockCreate).toHaveBeenCalled()
380-
const callArgs = mockCreate.mock.calls[0][0]
381-
expect(callArgs.temperature).toBe(0.6)
382-
})
383318
})
384319

385320
describe("error handling", () => {
@@ -515,7 +450,7 @@ describe("OpenAiHandler", () => {
515450
],
516451
stream: true,
517452
stream_options: { include_usage: true },
518-
// temperature should be omitted when not set
453+
temperature: 0,
519454
},
520455
{ path: "/models/chat/completions" },
521456
)

src/api/providers/__tests__/roo.spec.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -354,16 +354,16 @@ describe("RooHandler", () => {
354354
})
355355

356356
describe("temperature and model configuration", () => {
357-
it("should omit temperature when not explicitly set", async () => {
357+
it("should use default temperature of 0.7", async () => {
358358
handler = new RooHandler(mockOptions)
359359
const stream = handler.createMessage(systemPrompt, messages)
360360
for await (const _chunk of stream) {
361361
// Consume stream
362362
}
363363

364364
expect(mockCreate).toHaveBeenCalledWith(
365-
expect.not.objectContaining({
366-
temperature: expect.anything(),
365+
expect.objectContaining({
366+
temperature: 0.7,
367367
}),
368368
undefined,
369369
)

src/api/providers/__tests__/sambanova.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ describe("SambaNovaHandler", () => {
144144
expect.objectContaining({
145145
model: modelId,
146146
max_tokens: modelInfo.maxTokens,
147+
temperature: 0.7,
147148
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
148149
stream: true,
149150
stream_options: { include_usage: true },

src/api/providers/__tests__/zai.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,7 @@ describe("ZAiHandler", () => {
220220
expect.objectContaining({
221221
model: modelId,
222222
max_tokens: modelInfo.maxTokens,
223+
temperature: ZAI_DEFAULT_TEMPERATURE,
223224
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
224225
stream: true,
225226
stream_options: { include_usage: true },

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -73,20 +73,18 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
7373
info: { maxTokens: max_tokens },
7474
} = this.getModel()
7575

76+
const temperature = this.options.modelTemperature ?? this.defaultTemperature
77+
7678
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
7779
model,
7880
max_tokens,
81+
temperature,
7982
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
8083
stream: true,
8184
stream_options: { include_usage: true },
8285
}
8386

84-
// Only include temperature if explicitly set
85-
if (this.options.modelTemperature !== undefined) {
86-
params.temperature = this.options.modelTemperature
87-
}
88-
89-
return this.client.chat.completions.create(params, requestOptions)
87+
return this.client.chat.completions.create(params)
9088
}
9189

9290
override async *createMessage(

src/api/providers/openai.ts

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -157,20 +157,13 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
157157

158158
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
159159
model: modelId,
160+
temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
160161
messages: convertedMessages,
161162
stream: true as const,
162163
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
163164
...(reasoning && reasoning),
164165
}
165166

166-
// Only include temperature if explicitly set
167-
if (this.options.modelTemperature !== undefined) {
168-
requestOptions.temperature = this.options.modelTemperature
169-
} else if (deepseekReasoner) {
170-
// DeepSeek Reasoner has a specific default temperature
171-
requestOptions.temperature = DEEP_SEEK_DEFAULT_TEMPERATURE
172-
}
173-
174167
// Add max_tokens if needed
175168
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
176169

0 commit comments

Comments
 (0)