Skip to content

Commit a1c1182

Browse files
authored
Merge pull request #97 from actions/sgoedecke/defensive-parsing
Parse inference response format defensively
2 parents f347eae + dfaa426 commit a1c1182

File tree

3 files changed

+83
-59
lines changed

3 files changed

+83
-59
lines changed

dist/index.js

Lines changed: 37 additions & 26 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

dist/index.js.map

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/inference.ts

Lines changed: 45 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -53,21 +53,10 @@ export async function simpleInference(request: InferenceRequest): Promise<string
5353
chatCompletionRequest.response_format = request.responseFormat as any
5454
}
5555

56-
try {
57-
const response = await client.chat.completions.create(chatCompletionRequest)
58-
59-
if ('choices' in response) {
60-
const modelResponse = response.choices[0]?.message?.content
61-
core.info(`Model response: ${modelResponse || 'No response content'}`)
62-
return modelResponse || null
63-
} else {
64-
core.error(`Unexpected response format from API: ${JSON.stringify(response)}`)
65-
return null
66-
}
67-
} catch (error) {
68-
core.error(`API error: ${error}`)
69-
throw error
70-
}
56+
const response = await chatCompletion(client, chatCompletionRequest, 'simpleInference')
57+
const modelResponse = response.choices[0]?.message?.content
58+
core.info(`Model response: ${modelResponse || 'No response content'}`)
59+
return modelResponse || null
7160
}
7261

7362
/**
@@ -112,11 +101,7 @@ export async function mcpInference(
112101
}
113102

114103
try {
115-
const response = await client.chat.completions.create(chatCompletionRequest)
116-
117-
if (!('choices' in response)) {
118-
throw new Error(`Unexpected response format from API: ${JSON.stringify(response)}`)
119-
}
104+
const response = await chatCompletion(client, chatCompletionRequest, `mcpInference iteration ${iterationCount}`)
120105

121106
const assistantMessage = response.choices[0]?.message
122107
const modelResponse = assistantMessage?.content
@@ -133,34 +118,22 @@ export async function mcpInference(
133118
if (!toolCalls || toolCalls.length === 0) {
134119
core.info('No tool calls requested, ending GitHub MCP inference loop')
135120

136-
// If we have a response format set and we haven't explicitly run one final message iteration,
137-
// do another loop with the response format set
138121
if (request.responseFormat && !finalMessage) {
139122
core.info('Making one more MCP loop with the requested response format...')
140-
141-
// Add a user message requesting JSON format and try again
142123
messages.push({
143124
role: 'user',
144125
content: `Please provide your response in the exact ${request.responseFormat.type} format specified.`,
145126
})
146-
147127
finalMessage = true
148-
149-
// Continue the loop to get a properly formatted response
150128
continue
151129
} else {
152130
return modelResponse || null
153131
}
154132
}
155133

156134
core.info(`Model requested ${toolCalls.length} tool calls`)
157-
158-
// Execute all tool calls via GitHub MCP
159135
const toolResults = await executeToolCalls(githubMcpClient.client, toolCalls as ToolCall[])
160-
161-
// Add tool results to the conversation
162136
messages.push(...toolResults)
163-
164137
core.info('Tool results added, continuing conversation...')
165138
} catch (error) {
166139
core.error(`OpenAI API error: ${error}`)
@@ -178,3 +151,43 @@ export async function mcpInference(
178151

179152
return lastAssistantMessage?.content || null
180153
}
154+
155+
/**
156+
* Wrapper around OpenAI chat.completions.create with defensive handling for cases where
157+
* the SDK returns a raw string (e.g., unexpected content-type or streaming body) instead of
158+
* a parsed object. Ensures an object with a 'choices' array is returned or throws a descriptive error.
159+
*/
160+
async function chatCompletion(
161+
client: OpenAI,
162+
params: OpenAI.Chat.Completions.ChatCompletionCreateParams,
163+
context: string,
164+
): Promise<OpenAI.Chat.Completions.ChatCompletion> {
165+
try {
166+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
167+
let response: any = await client.chat.completions.create(params)
168+
core.debug(`${context}: raw response typeof=${typeof response}`)
169+
170+
if (typeof response === 'string') {
171+
// Attempt to parse if we unexpectedly received a string
172+
try {
173+
response = JSON.parse(response)
174+
} catch (e) {
175+
const preview = response.slice(0, 400)
176+
throw new Error(
177+
`${context}: Chat completion response was a string and not valid JSON (${(e as Error).message}). Preview: ${preview}`,
178+
)
179+
}
180+
}
181+
182+
if (!response || typeof response !== 'object' || !('choices' in response)) {
183+
const preview = JSON.stringify(response)?.slice(0, 800)
184+
throw new Error(`${context}: Unexpected response shape (no choices). Preview: ${preview}`)
185+
}
186+
187+
return response as OpenAI.Chat.Completions.ChatCompletion
188+
} catch (err) {
189+
// Re-throw after logging for upstream handling
190+
core.error(`${context}: chatCompletion failed: ${err}`)
191+
throw err
192+
}
193+
}

0 commit comments

Comments
 (0)