@@ -6,46 +6,53 @@ export type OpenAiNativeModelId = keyof typeof openAiNativeModels
66export const openAiNativeDefaultModelId : OpenAiNativeModelId = "gpt-5.1"
77
88export const openAiNativeModels = {
9- "gpt-5.1" : {
9+ "gpt-5.1-codex-max " : {
1010 maxTokens : 128000 ,
1111 contextWindow : 400000 ,
1212 supportsNativeTools : true ,
13+ includedTools : [ "apply_patch" ] ,
14+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
1315 supportsImages : true ,
1416 supportsPromptCache : true ,
1517 promptCacheRetention : "24h" ,
16- supportsReasoningEffort : [ "none" , " low", "medium" , "high" ] ,
18+ supportsReasoningEffort : [ "low" , "medium" , "high" ] ,
1719 reasoningEffort : "medium" ,
1820 inputPrice : 1.25 ,
1921 outputPrice : 10.0 ,
2022 cacheReadsPrice : 0.125 ,
21- supportsVerbosity : true ,
2223 supportsTemperature : false ,
23- tiers : [
24- { name : "flex" , contextWindow : 400000 , inputPrice : 0.625 , outputPrice : 5.0 , cacheReadsPrice : 0.0625 } ,
25- { name : "priority" , contextWindow : 400000 , inputPrice : 2.5 , outputPrice : 20.0 , cacheReadsPrice : 0.25 } ,
26- ] ,
27- description : "GPT-5.1: The best model for coding and agentic tasks across domains" ,
24+ tiers : [ { name : "priority" , contextWindow : 400000 , inputPrice : 2.5 , outputPrice : 20.0 , cacheReadsPrice : 0.25 } ] ,
25+ description :
26+ "GPT-5.1 Codex Max: Our most intelligent coding model optimized for long-horizon, agentic coding tasks" ,
2827 } ,
29- "gpt-5.1-codex " : {
28+ "gpt-5.1" : {
3029 maxTokens : 128000 ,
3130 contextWindow : 400000 ,
3231 supportsNativeTools : true ,
32+ includedTools : [ "apply_patch" ] ,
33+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
3334 supportsImages : true ,
3435 supportsPromptCache : true ,
3536 promptCacheRetention : "24h" ,
36- supportsReasoningEffort : [ "low" , "medium" , "high" ] ,
37+ supportsReasoningEffort : [ "none" , " low", "medium" , "high" ] ,
3738 reasoningEffort : "medium" ,
3839 inputPrice : 1.25 ,
3940 outputPrice : 10.0 ,
4041 cacheReadsPrice : 0.125 ,
42+ supportsVerbosity : true ,
4143 supportsTemperature : false ,
42- tiers : [ { name : "priority" , contextWindow : 400000 , inputPrice : 2.5 , outputPrice : 20.0 , cacheReadsPrice : 0.25 } ] ,
43- description : "GPT-5.1 Codex: A version of GPT-5.1 optimized for agentic coding in Codex" ,
44+ tiers : [
45+ { name : "flex" , contextWindow : 400000 , inputPrice : 0.625 , outputPrice : 5.0 , cacheReadsPrice : 0.0625 } ,
46+ { name : "priority" , contextWindow : 400000 , inputPrice : 2.5 , outputPrice : 20.0 , cacheReadsPrice : 0.25 } ,
47+ ] ,
48+ description : "GPT-5.1: The best model for coding and agentic tasks across domains" ,
4449 } ,
45- "gpt-5.1-codex-max " : {
50+ "gpt-5.1-codex" : {
4651 maxTokens : 128000 ,
4752 contextWindow : 400000 ,
4853 supportsNativeTools : true ,
54+ includedTools : [ "apply_patch" ] ,
55+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
4956 supportsImages : true ,
5057 supportsPromptCache : true ,
5158 promptCacheRetention : "24h" ,
@@ -56,13 +63,14 @@ export const openAiNativeModels = {
5663 cacheReadsPrice : 0.125 ,
5764 supportsTemperature : false ,
5865 tiers : [ { name : "priority" , contextWindow : 400000 , inputPrice : 2.5 , outputPrice : 20.0 , cacheReadsPrice : 0.25 } ] ,
59- description :
60- "GPT-5.1 Codex Max: Our most intelligent coding model optimized for long-horizon, agentic coding tasks" ,
66+ description : "GPT-5.1 Codex: A version of GPT-5.1 optimized for agentic coding in Codex" ,
6167 } ,
6268 "gpt-5.1-codex-mini" : {
6369 maxTokens : 128000 ,
6470 contextWindow : 400000 ,
6571 supportsNativeTools : true ,
72+ includedTools : [ "apply_patch" ] ,
73+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
6674 supportsImages : true ,
6775 supportsPromptCache : true ,
6876 promptCacheRetention : "24h" ,
@@ -78,6 +86,8 @@ export const openAiNativeModels = {
7886 maxTokens : 128000 ,
7987 contextWindow : 400000 ,
8088 supportsNativeTools : true ,
89+ includedTools : [ "apply_patch" ] ,
90+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
8191 supportsImages : true ,
8292 supportsPromptCache : true ,
8393 supportsReasoningEffort : [ "minimal" , "low" , "medium" , "high" ] ,
@@ -97,6 +107,8 @@ export const openAiNativeModels = {
97107 maxTokens : 128000 ,
98108 contextWindow : 400000 ,
99109 supportsNativeTools : true ,
110+ includedTools : [ "apply_patch" ] ,
111+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
100112 supportsImages : true ,
101113 supportsPromptCache : true ,
102114 supportsReasoningEffort : [ "minimal" , "low" , "medium" , "high" ] ,
@@ -116,6 +128,8 @@ export const openAiNativeModels = {
116128 maxTokens : 128000 ,
117129 contextWindow : 400000 ,
118130 supportsNativeTools : true ,
131+ includedTools : [ "apply_patch" ] ,
132+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
119133 supportsImages : true ,
120134 supportsPromptCache : true ,
121135 supportsReasoningEffort : [ "low" , "medium" , "high" ] ,
@@ -131,6 +145,8 @@ export const openAiNativeModels = {
131145 maxTokens : 128000 ,
132146 contextWindow : 400000 ,
133147 supportsNativeTools : true ,
148+ includedTools : [ "apply_patch" ] ,
149+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
134150 supportsImages : true ,
135151 supportsPromptCache : true ,
136152 supportsReasoningEffort : [ "minimal" , "low" , "medium" , "high" ] ,
@@ -147,6 +163,8 @@ export const openAiNativeModels = {
147163 maxTokens : 128000 ,
148164 contextWindow : 400000 ,
149165 supportsNativeTools : true ,
166+ includedTools : [ "apply_patch" ] ,
167+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
150168 supportsImages : true ,
151169 supportsPromptCache : true ,
152170 inputPrice : 1.25 ,
@@ -158,6 +176,8 @@ export const openAiNativeModels = {
158176 maxTokens : 32_768 ,
159177 contextWindow : 1_047_576 ,
160178 supportsNativeTools : true ,
179+ includedTools : [ "apply_patch" ] ,
180+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
161181 supportsImages : true ,
162182 supportsPromptCache : true ,
163183 inputPrice : 2 ,
@@ -172,6 +192,8 @@ export const openAiNativeModels = {
172192 maxTokens : 32_768 ,
173193 contextWindow : 1_047_576 ,
174194 supportsNativeTools : true ,
195+ includedTools : [ "apply_patch" ] ,
196+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
175197 supportsImages : true ,
176198 supportsPromptCache : true ,
177199 inputPrice : 0.4 ,
@@ -186,6 +208,8 @@ export const openAiNativeModels = {
186208 maxTokens : 32_768 ,
187209 contextWindow : 1_047_576 ,
188210 supportsNativeTools : true ,
211+ includedTools : [ "apply_patch" ] ,
212+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
189213 supportsImages : true ,
190214 supportsPromptCache : true ,
191215 inputPrice : 0.1 ,
@@ -394,6 +418,8 @@ export const openAiNativeModels = {
394418 maxTokens : 128000 ,
395419 contextWindow : 400000 ,
396420 supportsNativeTools : true ,
421+ includedTools : [ "apply_patch" ] ,
422+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
397423 supportsImages : true ,
398424 supportsPromptCache : true ,
399425 supportsReasoningEffort : [ "minimal" , "low" , "medium" , "high" ] ,
@@ -413,6 +439,8 @@ export const openAiNativeModels = {
413439 maxTokens : 128000 ,
414440 contextWindow : 400000 ,
415441 supportsNativeTools : true ,
442+ includedTools : [ "apply_patch" ] ,
443+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
416444 supportsImages : true ,
417445 supportsPromptCache : true ,
418446 supportsReasoningEffort : [ "minimal" , "low" , "medium" , "high" ] ,
@@ -432,6 +460,8 @@ export const openAiNativeModels = {
432460 maxTokens : 128000 ,
433461 contextWindow : 400000 ,
434462 supportsNativeTools : true ,
463+ includedTools : [ "apply_patch" ] ,
464+ excludedTools : [ "apply_diff" , "write_to_file" ] ,
435465 supportsImages : true ,
436466 supportsPromptCache : true ,
437467 supportsReasoningEffort : [ "minimal" , "low" , "medium" , "high" ] ,
0 commit comments