Skip to content

Commit f7c747c

Browse files
committed
encourage markdown inclusion for o1/o3
1 parent 48469bd commit f7c747c

File tree

1 file changed

+10
-2
lines changed

1 file changed

+10
-2
lines changed

app/client/platforms/openai.ts

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
5656

5757
export interface RequestPayload {
5858
messages: {
59-
role: "system" | "user" | "assistant";
59+
role: "developer" | "system" | "user" | "assistant";
6060
content: string | MultimodalContent[];
6161
}[];
6262
stream?: boolean;
@@ -237,8 +237,16 @@ export class ChatGPTApi implements LLMApi {
237237
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
238238
};
239239

240-
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
241240
if (isO1OrO3) {
241+
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
242+
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
243+
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
244+
requestPayload["messages"] = [
245+
{ role: "developer", content: "Formatting re-enabled" },
246+
...requestPayload["messages"],
247+
];
248+
249+
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
242250
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
243251
}
244252

0 commit comments

Comments
 (0)