Skip to content

Commit 57eccb5

Browse files
committed
feat: add reasoning effort support for all OpenAI models
1 parent b99eb41 commit 57eccb5

File tree

2 files changed

+22
-0
lines changed

2 files changed

+22
-0
lines changed

packages/types/src/providers/openai.ts

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ export const openAiNativeModels = {
1111
contextWindow: 256000,
1212
supportsImages: true,
1313
supportsPromptCache: true,
14+
supportsReasoningEffort: true,
1415
inputPrice: 1.25,
1516
outputPrice: 10.0,
1617
cacheReadsPrice: 0.125,
@@ -20,6 +21,7 @@ export const openAiNativeModels = {
2021
contextWindow: 256000,
2122
supportsImages: true,
2223
supportsPromptCache: true,
24+
supportsReasoningEffort: true,
2325
inputPrice: 0.25,
2426
outputPrice: 2.0,
2527
cacheReadsPrice: 0.025,
@@ -29,6 +31,7 @@ export const openAiNativeModels = {
2931
contextWindow: 256000,
3032
supportsImages: true,
3133
supportsPromptCache: true,
34+
supportsReasoningEffort: true,
3235
inputPrice: 0.05,
3336
outputPrice: 0.4,
3437
cacheReadsPrice: 0.005,
@@ -38,6 +41,7 @@ export const openAiNativeModels = {
3841
contextWindow: 256000,
3942
supportsImages: true,
4043
supportsPromptCache: true,
44+
supportsReasoningEffort: true,
4145
inputPrice: 0,
4246
outputPrice: 0,
4347
cacheReadsPrice: 0,
@@ -47,6 +51,7 @@ export const openAiNativeModels = {
4751
contextWindow: 1_047_576,
4852
supportsImages: true,
4953
supportsPromptCache: true,
54+
supportsReasoningEffort: true,
5055
inputPrice: 2,
5156
outputPrice: 8,
5257
cacheReadsPrice: 0.5,
@@ -56,6 +61,7 @@ export const openAiNativeModels = {
5661
contextWindow: 1_047_576,
5762
supportsImages: true,
5863
supportsPromptCache: true,
64+
supportsReasoningEffort: true,
5965
inputPrice: 0.4,
6066
outputPrice: 1.6,
6167
cacheReadsPrice: 0.1,
@@ -65,6 +71,7 @@ export const openAiNativeModels = {
6571
contextWindow: 1_047_576,
6672
supportsImages: true,
6773
supportsPromptCache: true,
74+
supportsReasoningEffort: true,
6875
inputPrice: 0.1,
6976
outputPrice: 0.4,
7077
cacheReadsPrice: 0.025,
@@ -167,6 +174,7 @@ export const openAiNativeModels = {
167174
contextWindow: 200_000,
168175
supportsImages: true,
169176
supportsPromptCache: true,
177+
supportsReasoningEffort: true,
170178
inputPrice: 15,
171179
outputPrice: 60,
172180
cacheReadsPrice: 7.5,
@@ -176,6 +184,7 @@ export const openAiNativeModels = {
176184
contextWindow: 128_000,
177185
supportsImages: true,
178186
supportsPromptCache: true,
187+
supportsReasoningEffort: true,
179188
inputPrice: 15,
180189
outputPrice: 60,
181190
cacheReadsPrice: 7.5,
@@ -185,6 +194,7 @@ export const openAiNativeModels = {
185194
contextWindow: 128_000,
186195
supportsImages: true,
187196
supportsPromptCache: true,
197+
supportsReasoningEffort: true,
188198
inputPrice: 1.1,
189199
outputPrice: 4.4,
190200
cacheReadsPrice: 0.55,
@@ -194,6 +204,7 @@ export const openAiNativeModels = {
194204
contextWindow: 128_000,
195205
supportsImages: true,
196206
supportsPromptCache: true,
207+
supportsReasoningEffort: true,
197208
inputPrice: 75,
198209
outputPrice: 150,
199210
cacheReadsPrice: 37.5,
@@ -203,6 +214,7 @@ export const openAiNativeModels = {
203214
contextWindow: 128_000,
204215
supportsImages: true,
205216
supportsPromptCache: true,
217+
supportsReasoningEffort: true,
206218
inputPrice: 2.5,
207219
outputPrice: 10,
208220
cacheReadsPrice: 1.25,
@@ -212,6 +224,7 @@ export const openAiNativeModels = {
212224
contextWindow: 128_000,
213225
supportsImages: true,
214226
supportsPromptCache: true,
227+
supportsReasoningEffort: true,
215228
inputPrice: 0.15,
216229
outputPrice: 0.6,
217230
cacheReadsPrice: 0.075,

src/api/providers/openai-native.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
6868
// o1 supports developer prompt with formatting
6969
// o1-preview and o1-mini only support user messages
7070
const isOriginalO1 = model.id === "o1"
71+
const { reasoning } = this.getModel()
72+
7173
const response = await this.client.chat.completions.create({
7274
model: model.id,
7375
messages: [
@@ -79,6 +81,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
7981
],
8082
stream: true,
8183
stream_options: { include_usage: true },
84+
...(reasoning && reasoning),
8285
})
8386

8487
yield* this.handleStreamResponse(response, model)
@@ -114,12 +117,15 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
114117
systemPrompt: string,
115118
messages: Anthropic.Messages.MessageParam[],
116119
): ApiStream {
120+
const { reasoning } = this.getModel()
121+
117122
const stream = await this.client.chat.completions.create({
118123
model: model.id,
119124
temperature: this.options.modelTemperature ?? OPENAI_NATIVE_DEFAULT_TEMPERATURE,
120125
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
121126
stream: true,
122127
stream_options: { include_usage: true },
128+
...(reasoning && reasoning),
123129
})
124130

125131
yield* this.handleStreamResponse(stream, model)
@@ -130,12 +136,15 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
130136
systemPrompt: string,
131137
messages: Anthropic.Messages.MessageParam[],
132138
): ApiStream {
139+
const { reasoning } = this.getModel()
140+
133141
const stream = await this.client.chat.completions.create({
134142
model: model.id,
135143
temperature: this.options.modelTemperature ?? OPENAI_NATIVE_DEFAULT_TEMPERATURE,
136144
messages: [{ role: "developer", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
137145
stream: true,
138146
stream_options: { include_usage: true },
147+
...(reasoning && reasoning),
139148
})
140149

141150
yield* this.handleStreamResponse(stream, model)

0 commit comments

Comments
 (0)