@@ -5,7 +5,7 @@ import { fetchSSE } from '../../utils/fetch-sse.mjs'
55import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs'
66import { isEmpty } from 'lodash-es'
77import { getCompletionPromptBase , pushRecord , setAbortController } from './shared.mjs'
8- import { getModelValue } from '../../utils/model-name-convert.mjs'
8+ import { getModelValue , isUsingO1Model } from '../../utils/model-name-convert.mjs'
99
1010/**
1111 * @param {Browser.Runtime.Port } port
@@ -116,13 +116,20 @@ export async function generateAnswersWithChatgptApiCompat(
116116) {
117117 const { controller, messageListener, disconnectListener } = setAbortController ( port )
118118 const model = getModelValue ( session )
119+ const isO1Model = isUsingO1Model ( session )
119120
120121 const config = await getUserConfig ( )
121122 const prompt = getConversationPairs (
122123 session . conversationRecords . slice ( - config . maxConversationContextLength ) ,
123124 false ,
124125 )
125- prompt . push ( { role : 'user' , content : question } )
126+
127+ // Filter out system messages for o1 models (only user and assistant are allowed)
128+ const filteredPrompt = isO1Model
129+ ? prompt . filter ( ( msg ) => msg . role === 'user' || msg . role === 'assistant' )
130+ : prompt
131+
132+ filteredPrompt . push ( { role : 'user' , content : question } )
126133
127134 let answer = ''
128135 let finished = false
@@ -132,21 +139,40 @@ export async function generateAnswersWithChatgptApiCompat(
132139 console . debug ( 'conversation history' , { content : session . conversationRecords } )
133140 port . postMessage ( { answer : null , done : true , session : session } )
134141 }
142+
143+ // Build request body with o1-specific parameters
144+ const requestBody = {
145+ messages : filteredPrompt ,
146+ model,
147+ ...extraBody ,
148+ }
149+
150+ if ( isO1Model ) {
151+ // o1 models use max_completion_tokens instead of max_tokens
152+ requestBody . max_completion_tokens = config . maxResponseTokenLength
153+ // o1 models don't support streaming during beta
154+ requestBody . stream = false
155+ // o1 models have fixed parameters during beta
156+ requestBody . temperature = 1
157+ requestBody . top_p = 1
158+ requestBody . n = 1
159+ requestBody . presence_penalty = 0
160+ requestBody . frequency_penalty = 0
161+ } else {
162+ // Non-o1 models use the existing behavior
163+ requestBody . stream = true
164+ requestBody . max_tokens = config . maxResponseTokenLength
165+ requestBody . temperature = config . temperature
166+ }
167+
135168 await fetchSSE ( `${ baseUrl } /chat/completions` , {
136169 method : 'POST' ,
137170 signal : controller . signal ,
138171 headers : {
139172 'Content-Type' : 'application/json' ,
140173 Authorization : `Bearer ${ apiKey } ` ,
141174 } ,
142- body : JSON . stringify ( {
143- messages : prompt ,
144- model,
145- stream : true ,
146- max_tokens : config . maxResponseTokenLength ,
147- temperature : config . temperature ,
148- ...extraBody ,
149- } ) ,
175+ body : JSON . stringify ( requestBody ) ,
150176 onMessage ( message ) {
151177 console . debug ( 'sse message' , message )
152178 if ( finished ) return
@@ -162,21 +188,32 @@ export async function generateAnswersWithChatgptApiCompat(
162188 return
163189 }
164190
165- const delta = data . choices [ 0 ] ?. delta ?. content
166- const content = data . choices [ 0 ] ?. message ?. content
167- const text = data . choices [ 0 ] ?. text
168- if ( delta !== undefined ) {
169- answer += delta
170- } else if ( content ) {
171- answer = content
172- } else if ( text ) {
173- answer += text
174- }
175- port . postMessage ( { answer : answer , done : false , session : null } )
191+ if ( isO1Model ) {
192+ // For o1 models (non-streaming), get the complete response
193+ const content = data . choices [ 0 ] ?. message ?. content
194+ if ( content ) {
195+ answer = content
196+ port . postMessage ( { answer : answer , done : false , session : null } )
197+ finish ( )
198+ }
199+ } else {
200+ // For non-o1 models (streaming), handle delta content
201+ const delta = data . choices [ 0 ] ?. delta ?. content
202+ const content = data . choices [ 0 ] ?. message ?. content
203+ const text = data . choices [ 0 ] ?. text
204+ if ( delta !== undefined ) {
205+ answer += delta
206+ } else if ( content ) {
207+ answer = content
208+ } else if ( text ) {
209+ answer += text
210+ }
211+ port . postMessage ( { answer : answer , done : false , session : null } )
176212
177- if ( data . choices [ 0 ] ?. finish_reason ) {
178- finish ( )
179- return
213+ if ( data . choices [ 0 ] ?. finish_reason ) {
214+ finish ( )
215+ return
216+ }
180217 }
181218 } ,
182219 async onStart ( ) { } ,
0 commit comments