Skip to content

Commit 7b437d9

Browse files
feat(chatcompletions): add handling of reasoning for third party providers (#292)
1 parent 0fe38c0 commit 7b437d9

8 files changed

+178
-6
lines changed

.changeset/three-moles-strive.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'@openai/agents-openai': patch
3+
'@openai/agents-core': patch
4+
---
5+
6+
feat: add reasoning handling in chat completions

packages/agents-core/src/types/protocol.ts

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,16 @@ export const InputText = SharedBase.extend({
6565

6666
export type InputText = z.infer<typeof InputText>;
6767

68+
export const ReasoningText = SharedBase.extend({
69+
type: z.literal('reasoning_text'),
70+
/**
71+
* A text input for example a message from a user
72+
*/
73+
text: z.string(),
74+
});
75+
76+
export type ReasoningText = z.infer<typeof ReasoningText>;
77+
6878
export const InputImage = SharedBase.extend({
6979
type: z.literal('input_image'),
7080

@@ -452,6 +462,11 @@ export const ReasoningItem = SharedBase.extend({
452462
* The user facing representation of the reasoning. Additional information might be in the `providerData` field.
453463
*/
454464
content: z.array(InputText),
465+
466+
/**
467+
* The raw reasoning text from the model.
468+
*/
469+
rawContent: z.array(ReasoningText).optional(),
455470
});
456471

457472
export type ReasoningItem = z.infer<typeof ReasoningItem>;

packages/agents-openai/src/openaiChatCompletionsConverter.ts

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -182,10 +182,11 @@ export function itemsToMessages(
182182
});
183183
}
184184
} else if (item.type === 'reasoning') {
185-
throw new UserError(
186-
'Reasoning is not supported for chat completions. Got item: ' +
187-
JSON.stringify(item),
188-
);
185+
const asst = ensureAssistantMessage();
186+
// @ts-expect-error - reasoning is not supported in the official Chat Completion API spec
187+
// this is handling third party providers that support reasoning
188+
asst.reasoning = item.rawContent?.[0]?.text;
189+
continue;
189190
} else if (item.type === 'hosted_tool_call') {
190191
if (item.name === 'file_search_call') {
191192
const asst = ensureAssistantMessage();

packages/agents-openai/src/openaiChatCompletionsModel.ts

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,23 @@ import { protocol } from '@openai/agents-core';
3535

3636
export const FAKE_ID = 'FAKE_ID';
3737

38+
// Some Chat Completions API compatible providers return a reasoning property on the message
39+
// If that's the case we handle them separately
40+
type OpenAIMessageWithReasoning =
41+
OpenAI.Chat.Completions.ChatCompletionMessage & {
42+
reasoning: string;
43+
};
44+
45+
function hasReasoningContent(
46+
message: OpenAI.Chat.Completions.ChatCompletionMessage,
47+
): message is OpenAIMessageWithReasoning {
48+
return (
49+
'reasoning' in message &&
50+
typeof message.reasoning === 'string' &&
51+
message.reasoning !== ''
52+
);
53+
}
54+
3855
/**
3956
* A model that uses (or is compatible with) OpenAI's Chat Completions API.
4057
*/
@@ -67,7 +84,19 @@ export class OpenAIChatCompletionsModel implements Model {
6784
const output: protocol.OutputModelItem[] = [];
6885
if (response.choices && response.choices[0]) {
6986
const message = response.choices[0].message;
70-
87+
88+
if (hasReasoningContent(message)) {
89+
output.push({
90+
type: 'reasoning',
91+
content: [],
92+
rawContent: [
93+
{
94+
type: 'reasoning_text',
95+
text: message.reasoning,
96+
},
97+
],
98+
});
99+
}
71100
if (
72101
message.content !== undefined &&
73102
message.content !== null &&

packages/agents-openai/src/openaiChatCompletionsStreaming.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ type StreamingState = {
99
text_content_index_and_output: [number, protocol.OutputText] | null;
1010
refusal_content_index_and_output: [number, protocol.Refusal] | null;
1111
function_calls: Record<number, protocol.FunctionCallItem>;
12+
reasoning: string;
1213
};
1314

1415
export async function* convertChatCompletionsStreamToResponses(
@@ -21,6 +22,7 @@ export async function* convertChatCompletionsStreamToResponses(
2122
text_content_index_and_output: null,
2223
refusal_content_index_and_output: null,
2324
function_calls: {},
25+
reasoning: '',
2426
};
2527

2628
for await (const chunk of stream) {
@@ -64,6 +66,14 @@ export async function* convertChatCompletionsStreamToResponses(
6466
state.text_content_index_and_output[1].text += delta.content;
6567
}
6668

69+
if (
70+
'reasoning' in delta &&
71+
delta.reasoning &&
72+
typeof delta.reasoning === 'string'
73+
) {
74+
state.reasoning += delta.reasoning;
75+
}
76+
6777
// Handle refusals
6878
if ('refusal' in delta && delta.refusal) {
6979
if (!state.refusal_content_index_and_output) {
@@ -98,6 +108,15 @@ export async function* convertChatCompletionsStreamToResponses(
98108

99109
// Final output message
100110
const outputs: protocol.OutputModelItem[] = [];
111+
112+
if (state.reasoning) {
113+
outputs.push({
114+
type: 'reasoning',
115+
content: [],
116+
rawContent: [{ type: 'reasoning_text', text: state.reasoning }],
117+
});
118+
}
119+
101120
if (
102121
state.text_content_index_and_output ||
103122
state.refusal_content_index_and_output

packages/agents-openai/test/openaiChatCompletionsConverter.test.ts

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,23 @@ describe('itemsToMessages', () => {
204204
];
205205
expect(() => itemsToMessages(bad)).toThrow(UserError);
206206
});
207+
208+
test('converts reasoning items into assistant reasoning', () => {
209+
const items: protocol.ModelItem[] = [
210+
{
211+
type: 'reasoning',
212+
content: [],
213+
rawContent: [{ type: 'reasoning_text', text: 'why' }],
214+
} as protocol.ReasoningItem,
215+
];
216+
const msgs = itemsToMessages(items);
217+
expect(msgs).toEqual([
218+
{
219+
role: 'assistant',
220+
reasoning: 'why',
221+
},
222+
]);
223+
});
207224
});
208225

209226
describe('tool helpers', () => {

packages/agents-openai/test/openaiChatCompletionsModel.test.ts

Lines changed: 54 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,13 @@ describe('OpenAIChatCompletionsModel', () => {
6868
type: 'message',
6969
role: 'assistant',
7070
status: 'completed',
71-
content: [{ type: 'output_text', text: 'hi', providerData: {} }],
71+
content: [
72+
{
73+
type: 'output_text',
74+
text: 'hi',
75+
providerData: {},
76+
},
77+
],
7278
},
7379
]);
7480
});
@@ -171,6 +177,53 @@ describe('OpenAIChatCompletionsModel', () => {
171177
]);
172178
});
173179

180+
it('handles reasoning messages from third-party providers', async () => {
181+
const client = new FakeClient();
182+
const response = {
183+
id: 'r',
184+
choices: [
185+
{
186+
message: { reasoning: 'because', content: 'hi' },
187+
},
188+
],
189+
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
190+
} as any;
191+
client.chat.completions.create.mockResolvedValue(response);
192+
193+
const model = new OpenAIChatCompletionsModel(client as any, 'gpt');
194+
const req: any = {
195+
input: 'u',
196+
modelSettings: {},
197+
tools: [],
198+
outputType: 'text',
199+
handoffs: [],
200+
tracing: false,
201+
};
202+
203+
const result = await withTrace('t', () => model.getResponse(req));
204+
205+
expect(result.output).toEqual([
206+
{
207+
type: 'reasoning',
208+
content: [],
209+
rawContent: [{ type: 'reasoning_text', text: 'because' }],
210+
},
211+
{
212+
id: 'r',
213+
type: 'message',
214+
role: 'assistant',
215+
status: 'completed',
216+
content: [
217+
{
218+
type: 'output_text',
219+
text: 'hi',
220+
providerData: { reasoning: 'because' },
221+
},
222+
],
223+
},
224+
]);
225+
});
226+
174227
it('handles function tool calls', async () => {
175228
const client = new FakeClient();
176229
const response = {

packages/agents-openai/test/openaiChatCompletionsStreaming.test.ts

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,4 +233,36 @@ describe('convertChatCompletionsStreamToResponses', () => {
233233
expect(deltas).toHaveLength(1);
234234
expect(deltas[0].delta).toBe('hi');
235235
});
236+
237+
it('accumulates reasoning deltas into a reasoning item', async () => {
238+
const resp: ChatCompletion = {
239+
id: 'r1',
240+
created: 0,
241+
model: 'gpt-test',
242+
object: 'chat.completion',
243+
choices: [],
244+
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
245+
} as any;
246+
247+
async function* stream() {
248+
yield makeChunk({ reasoning: 'foo' });
249+
yield makeChunk({ reasoning: 'bar' });
250+
}
251+
252+
const events: any[] = [];
253+
for await (const e of convertChatCompletionsStreamToResponses(
254+
resp,
255+
stream() as any,
256+
)) {
257+
events.push(e);
258+
}
259+
260+
const final = events[events.length - 1];
261+
expect(final.type).toBe('response_done');
262+
expect(final.response.output[0]).toEqual({
263+
type: 'reasoning',
264+
content: [],
265+
rawContent: [{ type: 'reasoning_text', text: 'foobar' }],
266+
});
267+
});
236268
});

0 commit comments

Comments
 (0)