Skip to content

Commit e34e860

Browse files
committed
Add tests
1 parent 6c44dff commit e34e860

File tree

3 files changed

+227
-4
lines changed

3 files changed

+227
-4
lines changed

packages/agents-core/test/run.test.ts

Lines changed: 135 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,13 @@
1-
import { beforeAll, describe, expect, expectTypeOf, it, vi } from 'vitest';
1+
import {
2+
beforeAll,
3+
beforeEach,
4+
afterEach,
5+
describe,
6+
expect,
7+
expectTypeOf,
8+
it,
9+
vi,
10+
} from 'vitest';
211
import { z } from 'zod';
312
import {
413
Agent,
@@ -32,7 +41,7 @@ import {
3241
TEST_MODEL_RESPONSE_BASIC,
3342
TEST_TOOL,
3443
} from './stubs';
35-
import { Model, ModelRequest } from '../src/model';
44+
import { Model, ModelProvider, ModelRequest } from '../src/model';
3645

3746
describe('Runner.run', () => {
3847
beforeAll(() => {
@@ -148,7 +157,11 @@ describe('Runner.run', () => {
148157

149158
// Track agent_end events on both the agent and runner
150159
const agentEndEvents: Array<{ context: any; output: string }> = [];
151-
const runnerEndEvents: Array<{ context: any; agent: any; output: string }> = [];
160+
const runnerEndEvents: Array<{
161+
context: any;
162+
agent: any;
163+
output: string;
164+
}> = [];
152165

153166
agent.on('agent_end', (context, output) => {
154167
agentEndEvents.push({ context, output });
@@ -407,7 +420,7 @@ describe('Runner.run', () => {
407420
usage: new Usage(),
408421
};
409422
class SimpleStreamingModel implements Model {
410-
constructor(private resps: ModelResponse[]) { }
423+
constructor(private resps: ModelResponse[]) {}
411424
async getResponse(_req: ModelRequest): Promise<ModelResponse> {
412425
const r = this.resps.shift();
413426
if (!r) {
@@ -525,6 +538,124 @@ describe('Runner.run', () => {
525538
});
526539
});
527540

541+
describe('gpt-5 default model adjustments', () => {
542+
class InspectableModel extends FakeModel {
543+
lastRequest: ModelRequest | undefined;
544+
545+
constructor(response: ModelResponse) {
546+
super([response]);
547+
}
548+
549+
override async getResponse(
550+
request: ModelRequest,
551+
): Promise<ModelResponse> {
552+
this.lastRequest = request;
553+
return await super.getResponse(request);
554+
}
555+
}
556+
557+
class InspectableModelProvider implements ModelProvider {
558+
constructor(private readonly model: Model) {}
559+
560+
async getModel(_name: string): Promise<Model> {
561+
return this.model;
562+
}
563+
}
564+
565+
let originalDefaultModel: string | undefined;
566+
567+
beforeEach(() => {
568+
originalDefaultModel = process.env.OPENAI_DEFAULT_MODEL;
569+
process.env.OPENAI_DEFAULT_MODEL = 'gpt-5o';
570+
});
571+
572+
afterEach(() => {
573+
if (originalDefaultModel === undefined) {
574+
delete process.env.OPENAI_DEFAULT_MODEL;
575+
} else {
576+
process.env.OPENAI_DEFAULT_MODEL = originalDefaultModel;
577+
}
578+
});
579+
580+
function createGpt5ModelSettings() {
581+
return {
582+
temperature: 0.42,
583+
providerData: {
584+
reasoning: { effort: 'high' },
585+
text: { verbosity: 'high' },
586+
reasoning_effort: 'medium',
587+
keep: 'value',
588+
},
589+
reasoning: { effort: 'high', summary: 'detailed' },
590+
text: { verbosity: 'medium' },
591+
};
592+
}
593+
594+
it('strips GPT-5-only settings when the runner model is not a GPT-5 string', async () => {
595+
const modelResponse: ModelResponse = {
596+
output: [fakeModelMessage('Hello non GPT-5')],
597+
usage: new Usage(),
598+
};
599+
const inspectableModel = new InspectableModel(modelResponse);
600+
const agent = new Agent({
601+
name: 'NonGpt5Runner',
602+
model: inspectableModel,
603+
modelSettings: createGpt5ModelSettings(),
604+
});
605+
606+
const runner = new Runner();
607+
const result = await runner.run(agent, 'hello');
608+
609+
expect(result.finalOutput).toBe('Hello non GPT-5');
610+
expect(inspectableModel.lastRequest).toBeDefined();
611+
612+
const requestSettings = inspectableModel.lastRequest!.modelSettings;
613+
expect(requestSettings.temperature).toBe(0.42);
614+
expect(requestSettings.providerData?.keep).toBe('value');
615+
expect(requestSettings.providerData?.reasoning).toBeUndefined();
616+
expect(requestSettings.providerData?.text?.verbosity).toBeUndefined();
617+
expect(
618+
(requestSettings.providerData as any)?.reasoning_effort,
619+
).toBeUndefined();
620+
expect(requestSettings.reasoning?.effort).toBeUndefined();
621+
expect(requestSettings.reasoning?.summary).toBeUndefined();
622+
expect(requestSettings.text?.verbosity).toBeUndefined();
623+
});
624+
625+
it('keeps GPT-5-only settings when the agent relies on the default model', async () => {
626+
const modelResponse: ModelResponse = {
627+
output: [fakeModelMessage('Hello default GPT-5')],
628+
usage: new Usage(),
629+
};
630+
const inspectableModel = new InspectableModel(modelResponse);
631+
const runner = new Runner({
632+
modelProvider: new InspectableModelProvider(inspectableModel),
633+
});
634+
635+
const agent = new Agent({
636+
name: 'DefaultModelAgent',
637+
modelSettings: createGpt5ModelSettings(),
638+
});
639+
640+
const result = await runner.run(agent, 'hello');
641+
642+
expect(result.finalOutput).toBe('Hello default GPT-5');
643+
expect(inspectableModel.lastRequest).toBeDefined();
644+
645+
const requestSettings = inspectableModel.lastRequest!.modelSettings;
646+
expect(requestSettings.providerData?.reasoning).toEqual({
647+
effort: 'high',
648+
});
649+
expect(requestSettings.providerData?.text?.verbosity).toBe('high');
650+
expect((requestSettings.providerData as any)?.reasoning_effort).toBe(
651+
'medium',
652+
);
653+
expect(requestSettings.reasoning?.effort).toBe('high');
654+
expect(requestSettings.reasoning?.summary).toBe('detailed');
655+
expect(requestSettings.text?.verbosity).toBe('medium');
656+
});
657+
});
658+
528659
describe('selectModel', () => {
529660
const MODEL_A = 'gpt-4o';
530661
const MODEL_B = 'gpt-4.1-mini';

packages/agents-openai/test/openaiChatCompletionsModel.test.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -250,6 +250,39 @@ describe('OpenAIChatCompletionsModel', () => {
250250
]);
251251
});
252252

253+
it('merges top-level reasoning and text settings into chat completions request payload', async () => {
254+
const client = new FakeClient();
255+
const response = {
256+
id: 'r',
257+
choices: [{ message: { content: 'hi' } }],
258+
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
259+
} as any;
260+
client.chat.completions.create.mockResolvedValue(response);
261+
262+
const model = new OpenAIChatCompletionsModel(client as any, 'gpt');
263+
const req: any = {
264+
input: 'u',
265+
modelSettings: {
266+
reasoning: { effort: 'high' },
267+
text: { verbosity: 'medium' },
268+
providerData: { customOption: 'keep' },
269+
},
270+
tools: [],
271+
outputType: 'text',
272+
handoffs: [],
273+
tracing: false,
274+
};
275+
276+
await withTrace('t', () => model.getResponse(req));
277+
278+
expect(client.chat.completions.create).toHaveBeenCalledTimes(1);
279+
const [args, options] = client.chat.completions.create.mock.calls[0];
280+
expect(args.reasoning_effort).toBe('high');
281+
expect(args.verbosity).toBe('medium');
282+
expect(args.customOption).toBe('keep');
283+
expect(options).toEqual({ headers: HEADERS, signal: undefined });
284+
});
285+
253286
it('handles function tool calls', async () => {
254287
const client = new FakeClient();
255288
const response = {

packages/agents-openai/test/openaiResponsesModel.test.ts

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,65 @@ describe('OpenAIResponsesModel', () => {
7474
});
7575
});
7676

77+
it('merges top-level reasoning and text settings into provider data for Responses API', async () => {
78+
withTrace('test', async () => {
79+
const fakeResponse = {
80+
id: 'res-settings',
81+
usage: {
82+
input_tokens: 0,
83+
output_tokens: 0,
84+
total_tokens: 0,
85+
},
86+
output: [],
87+
};
88+
const createMock = vi.fn().mockResolvedValue(fakeResponse);
89+
const fakeClient = {
90+
responses: { create: createMock },
91+
} as unknown as OpenAI;
92+
const model = new OpenAIResponsesModel(fakeClient, 'gpt-settings');
93+
94+
const request = {
95+
systemInstructions: undefined,
96+
input: 'hi',
97+
modelSettings: {
98+
reasoning: { effort: 'medium', summary: 'concise' },
99+
text: { verbosity: 'low' },
100+
providerData: {
101+
reasoning: { summary: 'override', note: 'provider' },
102+
text: { tone: 'playful' },
103+
customFlag: true,
104+
},
105+
},
106+
tools: [],
107+
outputType: 'text',
108+
handoffs: [],
109+
tracing: false,
110+
signal: undefined,
111+
};
112+
113+
await model.getResponse(request as any);
114+
115+
expect(createMock).toHaveBeenCalledTimes(1);
116+
const [args] = createMock.mock.calls[0];
117+
expect(args.reasoning).toEqual({
118+
effort: 'medium',
119+
summary: 'override',
120+
note: 'provider',
121+
});
122+
expect(args.text).toEqual({ verbosity: 'low', tone: 'playful' });
123+
expect(args.customFlag).toBe(true);
124+
125+
// ensure original provider data object was not mutated
126+
expect(request.modelSettings.providerData.reasoning).toEqual({
127+
summary: 'override',
128+
note: 'provider',
129+
});
130+
expect(request.modelSettings.providerData.text).toEqual({
131+
tone: 'playful',
132+
});
133+
});
134+
});
135+
77136
it('getStreamedResponse yields events and calls client with stream flag', async () => {
78137
withTrace('test', async () => {
79138
const fakeResponse = { id: 'res2', usage: {}, output: [] };

0 commit comments

Comments
 (0)