Skip to content

Commit 96cbd20

Browse files
Address automated code review feedback: improve error handling, validation, and code robustness
Co-authored-by: PeterDaveHello <[email protected]>
1 parent f878898 commit 96cbd20

File tree

2 files changed

+39
-19
lines changed

2 files changed

+39
-19
lines changed

src/services/apis/openai-api.mjs

Lines changed: 28 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ function extractContentFromArray(contentArray) {
3333

3434
return parts.join('')
3535
} catch (error) {
36-
console.debug('Error extracting content from array:', error)
36+
console.error('Error extracting content from array:', error)
3737
return ''
3838
}
3939
}
@@ -162,8 +162,12 @@ export async function generateAnswersWithChatgptApiCompat(
162162
)
163163

164164
// Filter messages based on model type
165+
// Reasoning models only support 'user' and 'assistant' roles during beta period
165166
const filteredPrompt = isReasoningModel
166-
? prompt.filter((msg) => msg.role === 'user' || msg.role === 'assistant')
167+
? prompt.filter((msg) => {
168+
const role = msg?.role
169+
return role === 'user' || role === 'assistant'
170+
})
167171
: prompt
168172

169173
filteredPrompt.push({ role: 'user', content: question })
@@ -185,6 +189,7 @@ export async function generateAnswersWithChatgptApiCompat(
185189
...extraBody,
186190
}
187191

192+
// Apply model-specific configurations
188193
if (isReasoningModel) {
189194
// Reasoning models use max_completion_tokens instead of max_tokens
190195
requestBody.max_completion_tokens = config.maxResponseTokenLength
@@ -196,21 +201,24 @@ export async function generateAnswersWithChatgptApiCompat(
196201
requestBody.n = 1
197202
requestBody.presence_penalty = 0
198203
requestBody.frequency_penalty = 0
199-
// Disallow tools/functions/function calling in reasoning mode
204+
// Remove unsupported parameters for reasoning models
200205
delete requestBody.tools
201206
delete requestBody.tool_choice
202207
delete requestBody.functions
203208
delete requestBody.function_call
209+
delete requestBody.max_tokens // Ensure max_tokens is not present
204210
} else {
205211
// Non-reasoning models use the existing behavior
206212
requestBody.stream = true
207213
requestBody.max_tokens = config.maxResponseTokenLength
208214
requestBody.temperature = config.temperature
209215
}
210216

211-
// Validate API key
217+
// Validate API key with detailed error message
212218
if (!apiKey || typeof apiKey !== 'string' || !apiKey.trim()) {
213-
throw new Error('Invalid API key provided')
219+
throw new Error(
220+
'Invalid or empty API key provided. Please check your OpenAI API key configuration.',
221+
)
214222
}
215223

216224
await fetchSSE(`${baseUrl}/chat/completions`, {
@@ -236,14 +244,15 @@ export async function generateAnswersWithChatgptApiCompat(
236244
return
237245
}
238246

247+
// Validate response structure early
248+
const choice = data.choices?.[0]
249+
if (!choice) {
250+
console.debug('No choice in response data')
251+
return
252+
}
253+
239254
if (isReasoningModel) {
240255
// For reasoning models (non-streaming), get the complete response
241-
const choice = data.choices?.[0]
242-
if (!choice) {
243-
console.debug('No choice in response data for reasoning model')
244-
return
245-
}
246-
247256
let content = choice.message?.content ?? choice.text
248257

249258
// Handle structured response arrays for reasoning models
@@ -258,6 +267,14 @@ export async function generateAnswersWithChatgptApiCompat(
258267
answer = trimmedContent
259268
port.postMessage({ answer, done: false, session: null })
260269
}
270+
} else if (content) {
271+
// Handle unexpected content types gracefully
272+
console.debug('Unexpected content type for reasoning model:', typeof content)
273+
const stringContent = String(content).trim()
274+
if (stringContent) {
275+
answer = stringContent
276+
port.postMessage({ answer, done: false, session: null })
277+
}
261278
}
262279

263280
// Only finish when we have a proper finish reason
@@ -266,11 +283,6 @@ export async function generateAnswersWithChatgptApiCompat(
266283
}
267284
} else {
268285
// For non-reasoning models (streaming), handle delta content
269-
const choice = data.choices?.[0]
270-
if (!choice) {
271-
console.debug('No choice in response data')
272-
return
273-
}
274286
const delta = choice.delta?.content
275287
const content = choice.message?.content
276288
const text = choice.text

src/utils/model-name-convert.mjs

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,19 +167,27 @@ export function isInApiModeGroup(apiModeGroup, configOrSession) {
167167

168168
export function isUsingReasoningModel(configOrSession) {
169169
const modelValue = getModelValue(configOrSession)
170-
if (!modelValue) return false
170+
if (!modelValue || typeof modelValue !== 'string') return false
171+
172+
// Normalize model value to handle potential whitespace
173+
const normalizedModelValue = modelValue.trim().toLowerCase()
171174

172175
// Match o1, o3, or o4 models with optional standard OpenAI suffixes
176+
// Uses word boundaries to prevent false positives like o10, o30, o40
173177
// Allows: o1, o1-preview, o1-mini, o3, o3-mini, o4, o4-mini, etc.
174178
// Prevents: o10, o30, o40, o1x, o3x, o4x, and other invalid patterns
175-
if (/^o[134](?:$|-(?:preview|mini|turbo|instruct|nano|small|medium|large))$/.test(modelValue)) {
179+
if (
180+
/^o[134](?:$|-(?:preview|mini|turbo|instruct|nano|small|medium|large))$/.test(
181+
normalizedModelValue,
182+
)
183+
) {
176184
return true
177185
}
178186

179187
// Match gpt-5* pattern but exclude gpt-5-chat-* variants
180188
// Allows: gpt-5, gpt-5-mini, gpt-5-nano, gpt-5-preview, gpt-5-turbo
181189
// Prevents: gpt-5-chat-latest, gpt-5-chat, etc.
182-
if (modelValue.startsWith('gpt-5') && !modelValue.startsWith('gpt-5-chat')) {
190+
if (normalizedModelValue.startsWith('gpt-5') && !normalizedModelValue.startsWith('gpt-5-chat')) {
183191
return true
184192
}
185193

0 commit comments

Comments
 (0)