Skip to content

Commit 17b9876

Browse files
Replace isUsingO1Model with isUsingReasoningModel and remove o1/o3 from default model list
Co-authored-by: PeterDaveHello <[email protected]>
1 parent f4ac0ae commit 17b9876

File tree

3 files changed

+14
-23
lines changed

3 files changed

+14
-23
lines changed

src/config/index.mjs

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -558,10 +558,6 @@ export const defaultConfig = {
558558
'openRouter_anthropic_claude_sonnet4',
559559
'openRouter_google_gemini_2_5_pro',
560560
'openRouter_openai_o3',
561-
'chatgptApiO1Preview',
562-
'chatgptApiO1Mini',
563-
'chatgptApiO3Preview',
564-
'chatgptApiO3Mini',
565561
'chatgptApiO4Mini',
566562
'chatgptApiGpt5',
567563
'chatgptApiGpt5Mini',

src/services/apis/openai-api.mjs

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { fetchSSE } from '../../utils/fetch-sse.mjs'
55
import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs'
66
import { isEmpty } from 'lodash-es'
77
import { getCompletionPromptBase, pushRecord, setAbortController } from './shared.mjs'
8-
import { getModelValue, isUsingO1Model } from '../../utils/model-name-convert.mjs'
8+
import { getModelValue, isUsingReasoningModel } from '../../utils/model-name-convert.mjs'
99

1010
/**
1111
* @param {Browser.Runtime.Port} port
@@ -122,16 +122,16 @@ export async function generateAnswersWithChatgptApiCompat(
122122
) {
123123
const { controller, messageListener, disconnectListener } = setAbortController(port)
124124
const model = getModelValue(session)
125-
const isO1Model = isUsingO1Model(session)
125+
const isReasoningModel = isUsingReasoningModel(session)
126126

127127
const config = await getUserConfig()
128128
const prompt = getConversationPairs(
129129
session.conversationRecords.slice(-config.maxConversationContextLength),
130130
false,
131131
)
132132

133-
// Filter out system messages for o1 models (only user and assistant are allowed)
134-
const filteredPrompt = isO1Model
133+
// Filter out system messages for reasoning models (only user and assistant are allowed)
134+
const filteredPrompt = isReasoningModel
135135
? prompt.filter((msg) => msg.role === 'user' || msg.role === 'assistant')
136136
: prompt
137137

@@ -146,26 +146,26 @@ export async function generateAnswersWithChatgptApiCompat(
146146
port.postMessage({ answer: null, done: true, session: session })
147147
}
148148

149-
// Build request body with o1-specific parameters
149+
// Build request body with reasoning model-specific parameters
150150
const requestBody = {
151151
messages: filteredPrompt,
152152
model,
153153
...extraBody,
154154
}
155155

156-
if (isO1Model) {
157-
// o1 models use max_completion_tokens instead of max_tokens
156+
if (isReasoningModel) {
157+
// Reasoning models use max_completion_tokens instead of max_tokens
158158
requestBody.max_completion_tokens = config.maxResponseTokenLength
159-
// o1 models don't support streaming during beta
159+
// Reasoning models don't support streaming during beta
160160
requestBody.stream = false
161-
// o1 models have fixed parameters during beta
161+
// Reasoning models have fixed parameters during beta
162162
requestBody.temperature = 1
163163
requestBody.top_p = 1
164164
requestBody.n = 1
165165
requestBody.presence_penalty = 0
166166
requestBody.frequency_penalty = 0
167167
} else {
168-
// Non-o1 models use the existing behavior
168+
// Non-reasoning models use the existing behavior
169169
requestBody.stream = true
170170
requestBody.max_tokens = config.maxResponseTokenLength
171171
requestBody.temperature = config.temperature
@@ -194,11 +194,11 @@ export async function generateAnswersWithChatgptApiCompat(
194194
return
195195
}
196196

197-
if (isO1Model) {
198-
// For o1 models (non-streaming), get the complete response
197+
if (isReasoningModel) {
198+
// For reasoning models (non-streaming), get the complete response
199199
const choice = data.choices?.[0]
200200
if (!choice) {
201-
console.debug('No choice in response data for o1 model')
201+
console.debug('No choice in response data for reasoning model')
202202
return
203203
}
204204
const content = choice.message?.content
@@ -208,7 +208,7 @@ export async function generateAnswersWithChatgptApiCompat(
208208
finish()
209209
}
210210
} else {
211-
// For non-o1 models (streaming), handle delta content
211+
// For non-reasoning models (streaming), handle delta content
212212
const choice = data.choices?.[0]
213213
if (!choice) {
214214
console.debug('No choice in response data')

src/utils/model-name-convert.mjs

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -179,8 +179,3 @@ export function isUsingReasoningModel(configOrSession) {
179179
modelValue === 'gpt-5-nano')
180180
)
181181
}
182-
183-
// Keep backward compatibility
184-
export function isUsingO1Model(configOrSession) {
185-
return isUsingReasoningModel(configOrSession)
186-
}

0 commit comments

Comments
 (0)