Skip to content

Commit a34f506

Browse files
authored
feat: add context management model settings (#1233)
1 parent 652c2f2 commit a34f506

5 files changed

Lines changed: 81 additions & 0 deletions

File tree

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'@openai/agents-core': patch
3+
'@openai/agents-openai': patch
4+
---
5+
6+
feat: add model settings support for context management

packages/agents-core/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ export {
147147
ModelRequest,
148148
ModelResponse,
149149
ModelSettings,
150+
ModelSettingsContextManagement,
150151
ModelSettingsToolChoice,
151152
RetryDecision,
152153
RetryPolicy,

packages/agents-core/src/model.ts

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,25 @@ export interface ModelSettingsText {
6767
verbosity?: 'low' | 'medium' | 'high' | null;
6868
}
6969

70+
export type ModelSettingsContextManagement = Array<{
71+
/**
72+
* The context-management strategy to apply.
73+
*/
74+
type: 'compaction' | (string & {});
75+
76+
/**
77+
* Rendered-token threshold that triggers server-side compaction.
78+
*/
79+
compactThreshold?: number;
80+
81+
/**
82+
* Rendered-token threshold that triggers server-side compaction.
83+
*/
84+
compact_threshold?: number;
85+
86+
[key: string]: unknown;
87+
}>;
88+
7089
export type RetryDecision =
7190
| boolean
7291
| {
@@ -281,6 +300,13 @@ export type ModelSettings = {
281300
*/
282301
promptCacheRetention?: 'in-memory' | '24h' | null;
283302

303+
/**
304+
* Context-management strategies to apply when calling the model.
305+
* This setting is available on OpenAI Responses requests, including server-side compaction.
306+
* See https://developers.openai.com/api/docs/guides/compaction.
307+
*/
308+
contextManagement?: ModelSettingsContextManagement;
309+
284310
/**
285311
* The reasoning settings to use when calling the model.
286312
*/

packages/agents-openai/src/openaiResponsesModel.ts

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import type {
1616
SerializedTool,
1717
ModelRequest,
1818
ModelResponse,
19+
ModelSettingsContextManagement,
1920
ModelSettingsToolChoice,
2021
ResponseStreamEvent,
2122
SerializedOutputType,
@@ -700,6 +701,16 @@ function getResponseFormat(
700701
};
701702
}
702703

704+
function getContextManagement(
705+
contextManagement: ModelSettingsContextManagement | undefined,
706+
): unknown {
707+
if (!contextManagement) {
708+
return undefined;
709+
}
710+
711+
return contextManagement.map((entry) => camelOrSnakeToSnakeCase(entry));
712+
}
713+
703714
function normalizeFunctionCallOutputForRequest(
704715
output: protocol.FunctionCallResultItem['output'],
705716
): string | ResponseFunctionCallOutputListItem[] {
@@ -3026,6 +3037,9 @@ export class OpenAIResponsesModel implements Model {
30263037
text: responseFormat,
30273038
store: request.modelSettings.store,
30283039
prompt_cache_retention: request.modelSettings.promptCacheRetention,
3040+
context_management: getContextManagement(
3041+
request.modelSettings.contextManagement,
3042+
),
30293043
...restOfProviderData,
30303044
};
30313045

packages/agents-openai/test/openaiResponsesModel.test.ts

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1083,6 +1083,40 @@ describe('OpenAIResponsesModel', () => {
10831083
});
10841084
});
10851085

1086+
it('sends context management settings to the Responses API', async () => {
1087+
await withTrace('test', async () => {
1088+
const fakeResponse = { id: 'res-context', usage: {}, output: [] };
1089+
const createMock = vi.fn().mockResolvedValue(fakeResponse);
1090+
const fakeClient = {
1091+
responses: { create: createMock },
1092+
} as unknown as OpenAI;
1093+
const model = new OpenAIResponsesModel(fakeClient, 'gpt-context');
1094+
1095+
const request = {
1096+
systemInstructions: undefined,
1097+
input: 'hello',
1098+
modelSettings: {
1099+
contextManagement: [{ type: 'compaction', compactThreshold: 200000 }],
1100+
},
1101+
tools: [],
1102+
outputType: 'text',
1103+
handoffs: [],
1104+
tracing: false,
1105+
signal: undefined,
1106+
};
1107+
1108+
await model.getResponse(request as any);
1109+
1110+
const [args] = createMock.mock.calls[0];
1111+
expect(args.context_management).toEqual([
1112+
{
1113+
type: 'compaction',
1114+
compact_threshold: 200000,
1115+
},
1116+
]);
1117+
});
1118+
});
1119+
10861120
it('still sends an empty tools array when no prompt is provided', async () => {
10871121
await withTrace('test', async () => {
10881122
const fakeResponse = { id: 'res-no-prompt', usage: {}, output: [] };

0 commit comments

Comments
 (0)