Skip to content

Commit 2be919e

Browse files
Add OpenAI o1 series model support with API compatibility
Co-authored-by: PeterDaveHello <[email protected]>
1 parent 3c6f522 commit 2be919e

File tree

3 files changed

+73
-24
lines changed

3 files changed

+73
-24
lines changed

src/config/index.mjs

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,8 @@ export const chatgptApiModelKeys = [
6565
'chatgptApi4_1',
6666
'chatgptApi4_1_mini',
6767
'chatgptApi4_1_nano',
68+
'chatgptApiO1Preview',
69+
'chatgptApiO1Mini',
6870
]
6971
export const customApiModelKeys = ['customModel']
7072
export const ollamaApiModelKeys = ['ollamaModel']
@@ -256,6 +258,9 @@ export const Models = {
256258
chatgptApi4_1_mini: { value: 'gpt-4.1-mini', desc: 'ChatGPT (GPT-4.1 mini)' },
257259
chatgptApi4_1_nano: { value: 'gpt-4.1-nano', desc: 'ChatGPT (GPT-4.1 nano)' },
258260

261+
chatgptApiO1Preview: { value: 'o1-preview', desc: 'ChatGPT (o1-preview)' },
262+
chatgptApiO1Mini: { value: 'o1-mini', desc: 'ChatGPT (o1-mini)' },
263+
259264
claude2WebFree: { value: '', desc: 'Claude.ai (Web)' },
260265
claude12Api: { value: 'claude-instant-1.2', desc: 'Claude.ai (API, Claude Instant 1.2)' },
261266
claude2Api: { value: 'claude-2.0', desc: 'Claude.ai (API, Claude 2)' },
@@ -541,6 +546,8 @@ export const defaultConfig = {
541546
'openRouter_anthropic_claude_sonnet4',
542547
'openRouter_google_gemini_2_5_pro',
543548
'openRouter_openai_o3',
549+
'chatgptApiO1Preview',
550+
'chatgptApiO1Mini',
544551
],
545552
customApiModes: [
546553
{

src/services/apis/openai-api.mjs

Lines changed: 61 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { fetchSSE } from '../../utils/fetch-sse.mjs'
55
import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs'
66
import { isEmpty } from 'lodash-es'
77
import { getCompletionPromptBase, pushRecord, setAbortController } from './shared.mjs'
8-
import { getModelValue } from '../../utils/model-name-convert.mjs'
8+
import { getModelValue, isUsingO1Model } from '../../utils/model-name-convert.mjs'
99

1010
/**
1111
* @param {Browser.Runtime.Port} port
@@ -116,13 +116,20 @@ export async function generateAnswersWithChatgptApiCompat(
116116
) {
117117
const { controller, messageListener, disconnectListener } = setAbortController(port)
118118
const model = getModelValue(session)
119+
const isO1Model = isUsingO1Model(session)
119120

120121
const config = await getUserConfig()
121122
const prompt = getConversationPairs(
122123
session.conversationRecords.slice(-config.maxConversationContextLength),
123124
false,
124125
)
125-
prompt.push({ role: 'user', content: question })
126+
127+
// Filter out system messages for o1 models (only user and assistant are allowed)
128+
const filteredPrompt = isO1Model
129+
? prompt.filter((msg) => msg.role === 'user' || msg.role === 'assistant')
130+
: prompt
131+
132+
filteredPrompt.push({ role: 'user', content: question })
126133

127134
let answer = ''
128135
let finished = false
@@ -132,21 +139,40 @@ export async function generateAnswersWithChatgptApiCompat(
132139
console.debug('conversation history', { content: session.conversationRecords })
133140
port.postMessage({ answer: null, done: true, session: session })
134141
}
142+
143+
// Build request body with o1-specific parameters
144+
const requestBody = {
145+
messages: filteredPrompt,
146+
model,
147+
...extraBody,
148+
}
149+
150+
if (isO1Model) {
151+
// o1 models use max_completion_tokens instead of max_tokens
152+
requestBody.max_completion_tokens = config.maxResponseTokenLength
153+
// o1 models don't support streaming during beta
154+
requestBody.stream = false
155+
// o1 models have fixed parameters during beta
156+
requestBody.temperature = 1
157+
requestBody.top_p = 1
158+
requestBody.n = 1
159+
requestBody.presence_penalty = 0
160+
requestBody.frequency_penalty = 0
161+
} else {
162+
// Non-o1 models use the existing behavior
163+
requestBody.stream = true
164+
requestBody.max_tokens = config.maxResponseTokenLength
165+
requestBody.temperature = config.temperature
166+
}
167+
135168
await fetchSSE(`${baseUrl}/chat/completions`, {
136169
method: 'POST',
137170
signal: controller.signal,
138171
headers: {
139172
'Content-Type': 'application/json',
140173
Authorization: `Bearer ${apiKey}`,
141174
},
142-
body: JSON.stringify({
143-
messages: prompt,
144-
model,
145-
stream: true,
146-
max_tokens: config.maxResponseTokenLength,
147-
temperature: config.temperature,
148-
...extraBody,
149-
}),
175+
body: JSON.stringify(requestBody),
150176
onMessage(message) {
151177
console.debug('sse message', message)
152178
if (finished) return
@@ -162,21 +188,32 @@ export async function generateAnswersWithChatgptApiCompat(
162188
return
163189
}
164190

165-
const delta = data.choices[0]?.delta?.content
166-
const content = data.choices[0]?.message?.content
167-
const text = data.choices[0]?.text
168-
if (delta !== undefined) {
169-
answer += delta
170-
} else if (content) {
171-
answer = content
172-
} else if (text) {
173-
answer += text
174-
}
175-
port.postMessage({ answer: answer, done: false, session: null })
191+
if (isO1Model) {
192+
// For o1 models (non-streaming), get the complete response
193+
const content = data.choices[0]?.message?.content
194+
if (content) {
195+
answer = content
196+
port.postMessage({ answer: answer, done: false, session: null })
197+
finish()
198+
}
199+
} else {
200+
// For non-o1 models (streaming), handle delta content
201+
const delta = data.choices[0]?.delta?.content
202+
const content = data.choices[0]?.message?.content
203+
const text = data.choices[0]?.text
204+
if (delta !== undefined) {
205+
answer += delta
206+
} else if (content) {
207+
answer = content
208+
} else if (text) {
209+
answer += text
210+
}
211+
port.postMessage({ answer: answer, done: false, session: null })
176212

177-
if (data.choices[0]?.finish_reason) {
178-
finish()
179-
return
213+
if (data.choices[0]?.finish_reason) {
214+
finish()
215+
return
216+
}
180217
}
181218
},
182219
async onStart() {},

src/utils/model-name-convert.mjs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,3 +164,8 @@ export function isInApiModeGroup(apiModeGroup, configOrSession) {
164164
const [, { value: groupValue }] = foundGroup
165165
return groupValue === apiModeGroup
166166
}
167+
168+
export function isUsingO1Model(configOrSession) {
169+
const modelValue = getModelValue(configOrSession)
170+
return modelValue && (modelValue === 'o1-preview' || modelValue === 'o1-mini')
171+
}

0 commit comments

Comments
 (0)