Skip to content

Commit c261ebc

Browse files
committed
use unshift to improve perf
1 parent f7c747c commit c261ebc

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

app/client/platforms/openai.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -241,12 +241,12 @@ export class ChatGPTApi implements LLMApi {
241241
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
242242
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
243243
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
244-
requestPayload["messages"] = [
245-
{ role: "developer", content: "Formatting re-enabled" },
246-
...requestPayload["messages"],
247-
];
244+
requestPayload["messages"].unshift({
245+
role: "developer",
246+
content: "Formatting re-enabled",
247+
});
248248

249-
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
249+
// o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
250250
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
251251
}
252252

0 commit comments

Comments
 (0)