Skip to content

Commit 6f86819

Browse files
committed
Revert "GPT5 OpenAI Fix (#6864)"
This reverts commit cda67a8.
1 parent 76e5a72 commit 6f86819

File tree

33 files changed

+248
-2417
lines changed

33 files changed

+248
-2417
lines changed

packages/types/src/message.ts

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -176,17 +176,6 @@ export const clineMessageSchema = z.object({
176176
contextCondense: contextCondenseSchema.optional(),
177177
isProtected: z.boolean().optional(),
178178
apiProtocol: z.union([z.literal("openai"), z.literal("anthropic")]).optional(),
179-
metadata: z
180-
.object({
181-
gpt5: z
182-
.object({
183-
previous_response_id: z.string().optional(),
184-
instructions: z.string().optional(),
185-
reasoning_summary: z.string().optional(),
186-
})
187-
.optional(),
188-
})
189-
.optional(),
190179
})
191180

192181
export type ClineMessage = z.infer<typeof clineMessageSchema>

packages/types/src/model.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,6 @@ export const modelInfoSchema = z.object({
4444
supportsImages: z.boolean().optional(),
4545
supportsComputerUse: z.boolean().optional(),
4646
supportsPromptCache: z.boolean(),
47-
// Capability flag to indicate whether the model supports an output verbosity parameter
48-
supportsVerbosity: z.boolean().optional(),
4947
supportsReasoningBudget: z.boolean().optional(),
5048
requiredReasoningBudget: z.boolean().optional(),
5149
supportsReasoningEffort: z.boolean().optional(),

packages/types/src/provider-settings.ts

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,6 @@ import { z } from "zod"
33
import { reasoningEffortsSchema, verbosityLevelsSchema, modelInfoSchema } from "./model.js"
44
import { codebaseIndexProviderSchema } from "./codebase-index.js"
55

6-
// Extended schema that includes "minimal" for GPT-5 models
7-
export const extendedReasoningEffortsSchema = z.union([reasoningEffortsSchema, z.literal("minimal")])
8-
9-
export type ReasoningEffortWithMinimal = z.infer<typeof extendedReasoningEffortsSchema>
10-
116
/**
127
* ProviderName
138
*/
@@ -81,7 +76,7 @@ const baseProviderSettingsSchema = z.object({
8176

8277
// Model reasoning.
8378
enableReasoningEffort: z.boolean().optional(),
84-
reasoningEffort: extendedReasoningEffortsSchema.optional(),
79+
reasoningEffort: reasoningEffortsSchema.optional(),
8580
modelMaxTokens: z.number().optional(),
8681
modelMaxThinkingTokens: z.number().optional(),
8782

packages/types/src/providers/openai.ts

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,39 +12,32 @@ export const openAiNativeModels = {
1212
supportsImages: true,
1313
supportsPromptCache: true,
1414
supportsReasoningEffort: true,
15-
reasoningEffort: "medium",
1615
inputPrice: 1.25,
1716
outputPrice: 10.0,
1817
cacheReadsPrice: 0.13,
1918
description: "GPT-5: The best model for coding and agentic tasks across domains",
20-
// supportsVerbosity is a new capability; ensure ModelInfo includes it
21-
supportsVerbosity: true,
2219
},
2320
"gpt-5-mini-2025-08-07": {
2421
maxTokens: 128000,
2522
contextWindow: 400000,
2623
supportsImages: true,
2724
supportsPromptCache: true,
2825
supportsReasoningEffort: true,
29-
reasoningEffort: "medium",
3026
inputPrice: 0.25,
3127
outputPrice: 2.0,
3228
cacheReadsPrice: 0.03,
3329
description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
34-
supportsVerbosity: true,
3530
},
3631
"gpt-5-nano-2025-08-07": {
3732
maxTokens: 128000,
3833
contextWindow: 400000,
3934
supportsImages: true,
4035
supportsPromptCache: true,
4136
supportsReasoningEffort: true,
42-
reasoningEffort: "medium",
4337
inputPrice: 0.05,
4438
outputPrice: 0.4,
4539
cacheReadsPrice: 0.01,
4640
description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
47-
supportsVerbosity: true,
4841
},
4942
"gpt-4.1": {
5043
maxTokens: 32_768,
@@ -247,6 +240,5 @@ export const openAiModelInfoSaneDefaults: ModelInfo = {
247240
export const azureOpenAiDefaultApiVersion = "2024-08-01-preview"
248241

249242
export const OPENAI_NATIVE_DEFAULT_TEMPERATURE = 0
250-
export const GPT5_DEFAULT_TEMPERATURE = 1.0
251243

252244
export const OPENAI_AZURE_AI_INFERENCE_PATH = "/models/chat/completions"

src/api/index.ts

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,6 @@ export interface SingleCompletionHandler {
4444
export interface ApiHandlerCreateMessageMetadata {
4545
mode?: string
4646
taskId: string
47-
previousResponseId?: string
48-
/**
49-
* When true, the provider must NOT fall back to internal continuity state
50-
* (e.g., lastResponseId) if previousResponseId is absent.
51-
* Used to enforce "skip once" after a condense operation.
52-
*/
53-
suppressPreviousResponseId?: boolean
5447
}
5548

5649
export interface ApiHandler {

0 commit comments

Comments
 (0)