Skip to content

Commit d3939c9

Browse files
Update openai.ts search workaround v2
1 parent 532b8f4 commit d3939c9

File tree

1 file changed

+18
-1
lines changed

1 file changed

+18
-1
lines changed

app/client/platforms/openai.ts

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -225,6 +225,23 @@ export class ChatGPTApi implements LLMApi {
225225
}
226226

227227
// O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
228+
229+
const isNewModel = options.config.model.endsWith("-search-preview");
230+
231+
requestPayload = {
232+
messages,
233+
stream: options.config.stream,
234+
model: modelConfig.model,
235+
...(isNewModel
236+
? {} // Exclude parameters for the new model
237+
: {
238+
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
239+
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
240+
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
241+
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
242+
}),
243+
};
244+
/*
228245
requestPayload = {
229246
messages,
230247
stream: options.config.stream,
@@ -236,7 +253,7 @@ export class ChatGPTApi implements LLMApi {
236253
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
237254
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
238255
};
239-
256+
*/
240257
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
241258
if (isO1OrO3) {
242259
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;

0 commit comments

Comments
 (0)