@@ -5,7 +5,7 @@ import { fetchSSE } from '../../utils/fetch-sse.mjs'
55import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs'
66import { isEmpty } from 'lodash-es'
77import { getCompletionPromptBase , pushRecord , setAbortController } from './shared.mjs'
8- import { getModelValue , isUsingO1Model } from '../../utils/model-name-convert.mjs'
8+ import { getModelValue , isUsingReasoningModel } from '../../utils/model-name-convert.mjs'
99
1010/**
1111 * @param {Browser.Runtime.Port } port
@@ -122,16 +122,16 @@ export async function generateAnswersWithChatgptApiCompat(
122122) {
123123 const { controller, messageListener, disconnectListener } = setAbortController ( port )
124124 const model = getModelValue ( session )
125- const isO1Model = isUsingO1Model ( session )
125+ const isReasoningModel = isUsingReasoningModel ( session )
126126
127127 const config = await getUserConfig ( )
128128 const prompt = getConversationPairs (
129129 session . conversationRecords . slice ( - config . maxConversationContextLength ) ,
130130 false ,
131131 )
132132
133- // Filter out system messages for o1 models (only user and assistant are allowed)
134- const filteredPrompt = isO1Model
133+ // Filter out system messages for reasoning models (only user and assistant are allowed)
134+ const filteredPrompt = isReasoningModel
135135 ? prompt . filter ( ( msg ) => msg . role === 'user' || msg . role === 'assistant' )
136136 : prompt
137137
@@ -146,26 +146,26 @@ export async function generateAnswersWithChatgptApiCompat(
146146 port . postMessage ( { answer : null , done : true , session : session } )
147147 }
148148
149- // Build request body with o1 -specific parameters
149+ // Build request body with reasoning model -specific parameters
150150 const requestBody = {
151151 messages : filteredPrompt ,
152152 model,
153153 ...extraBody ,
154154 }
155155
156- if ( isO1Model ) {
157- // o1 models use max_completion_tokens instead of max_tokens
156+ if ( isReasoningModel ) {
157+ // Reasoning models use max_completion_tokens instead of max_tokens
158158 requestBody . max_completion_tokens = config . maxResponseTokenLength
159- // o1 models don't support streaming during beta
159+ // Reasoning models don't support streaming during beta
160160 requestBody . stream = false
161- // o1 models have fixed parameters during beta
161+ // Reasoning models have fixed parameters during beta
162162 requestBody . temperature = 1
163163 requestBody . top_p = 1
164164 requestBody . n = 1
165165 requestBody . presence_penalty = 0
166166 requestBody . frequency_penalty = 0
167167 } else {
168- // Non-o1 models use the existing behavior
168+ // Non-reasoning models use the existing behavior
169169 requestBody . stream = true
170170 requestBody . max_tokens = config . maxResponseTokenLength
171171 requestBody . temperature = config . temperature
@@ -194,11 +194,11 @@ export async function generateAnswersWithChatgptApiCompat(
194194 return
195195 }
196196
197- if ( isO1Model ) {
198- // For o1 models (non-streaming), get the complete response
197+ if ( isReasoningModel ) {
198+ // For reasoning models (non-streaming), get the complete response
199199 const choice = data . choices ?. [ 0 ]
200200 if ( ! choice ) {
201- console . debug ( 'No choice in response data for o1 model' )
201+ console . debug ( 'No choice in response data for reasoning model' )
202202 return
203203 }
204204 const content = choice . message ?. content
@@ -208,7 +208,7 @@ export async function generateAnswersWithChatgptApiCompat(
208208 finish ( )
209209 }
210210 } else {
211- // For non-o1 models (streaming), handle delta content
211+ // For non-reasoning models (streaming), handle delta content
212212 const choice = data . choices ?. [ 0 ]
213213 if ( ! choice ) {
214214 console . debug ( 'No choice in response data' )
0 commit comments