Skip to content

Commit 36572f2

Browse files
committed
feat: update Gemini and Vertex AI models from preview to GA version (#5444)
- Remove outdated preview models (gemini-2.5-pro-preview-*) from type definitions - Update to GA model (gemini-2.5-pro) across all providers - Implement backward compatibility for existing users with preview models - Update OpenRouter integration to remove preview model references - Fix related tests to reflect model changes Fixes #5444
1 parent ad201cc commit 36572f2

File tree

8 files changed

+40
-111
lines changed

8 files changed

+40
-111
lines changed

packages/types/src/providers/gemini.ts

Lines changed: 0 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -68,80 +68,6 @@ export const geminiModels = {
6868
inputPrice: 0,
6969
outputPrice: 0,
7070
},
71-
"gemini-2.5-pro-preview-03-25": {
72-
maxTokens: 65_535,
73-
contextWindow: 1_048_576,
74-
supportsImages: true,
75-
supportsPromptCache: true,
76-
inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
77-
outputPrice: 15,
78-
cacheReadsPrice: 0.625,
79-
cacheWritesPrice: 4.5,
80-
tiers: [
81-
{
82-
contextWindow: 200_000,
83-
inputPrice: 1.25,
84-
outputPrice: 10,
85-
cacheReadsPrice: 0.31,
86-
},
87-
{
88-
contextWindow: Infinity,
89-
inputPrice: 2.5,
90-
outputPrice: 15,
91-
cacheReadsPrice: 0.625,
92-
},
93-
],
94-
},
95-
"gemini-2.5-pro-preview-05-06": {
96-
maxTokens: 65_535,
97-
contextWindow: 1_048_576,
98-
supportsImages: true,
99-
supportsPromptCache: true,
100-
inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
101-
outputPrice: 15,
102-
cacheReadsPrice: 0.625,
103-
cacheWritesPrice: 4.5,
104-
tiers: [
105-
{
106-
contextWindow: 200_000,
107-
inputPrice: 1.25,
108-
outputPrice: 10,
109-
cacheReadsPrice: 0.31,
110-
},
111-
{
112-
contextWindow: Infinity,
113-
inputPrice: 2.5,
114-
outputPrice: 15,
115-
cacheReadsPrice: 0.625,
116-
},
117-
],
118-
},
119-
"gemini-2.5-pro-preview-06-05": {
120-
maxTokens: 65_535,
121-
contextWindow: 1_048_576,
122-
supportsImages: true,
123-
supportsPromptCache: true,
124-
inputPrice: 2.5, // This is the pricing for prompts above 200k tokens.
125-
outputPrice: 15,
126-
cacheReadsPrice: 0.625,
127-
cacheWritesPrice: 4.5,
128-
maxThinkingTokens: 32_768,
129-
supportsReasoningBudget: true,
130-
tiers: [
131-
{
132-
contextWindow: 200_000,
133-
inputPrice: 1.25,
134-
outputPrice: 10,
135-
cacheReadsPrice: 0.31,
136-
},
137-
{
138-
contextWindow: Infinity,
139-
inputPrice: 2.5,
140-
outputPrice: 15,
141-
cacheReadsPrice: 0.625,
142-
},
143-
],
144-
},
14571
"gemini-2.5-pro": {
14672
maxTokens: 64_000,
14773
contextWindow: 1_048_576,

packages/types/src/providers/openrouter.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@ export const OPEN_ROUTER_REASONING_BUDGET_MODELS = new Set([
7878
"anthropic/claude-3.7-sonnet:beta",
7979
"anthropic/claude-opus-4",
8080
"anthropic/claude-sonnet-4",
81-
"google/gemini-2.5-pro-preview",
8281
"google/gemini-2.5-pro",
8382
"google/gemini-2.5-flash-preview-05-20",
8483
"google/gemini-2.5-flash",

packages/types/src/providers/vertex.ts

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -56,32 +56,6 @@ export const vertexModels = {
5656
inputPrice: 0.15,
5757
outputPrice: 0.6,
5858
},
59-
"gemini-2.5-pro-preview-03-25": {
60-
maxTokens: 65_535,
61-
contextWindow: 1_048_576,
62-
supportsImages: true,
63-
supportsPromptCache: true,
64-
inputPrice: 2.5,
65-
outputPrice: 15,
66-
},
67-
"gemini-2.5-pro-preview-05-06": {
68-
maxTokens: 65_535,
69-
contextWindow: 1_048_576,
70-
supportsImages: true,
71-
supportsPromptCache: true,
72-
inputPrice: 2.5,
73-
outputPrice: 15,
74-
},
75-
"gemini-2.5-pro-preview-06-05": {
76-
maxTokens: 65_535,
77-
contextWindow: 1_048_576,
78-
supportsImages: true,
79-
supportsPromptCache: true,
80-
inputPrice: 2.5,
81-
outputPrice: 15,
82-
maxThinkingTokens: 32_768,
83-
supportsReasoningBudget: true,
84-
},
8559
"gemini-2.5-pro": {
8660
maxTokens: 64_000,
8761
contextWindow: 1_048_576,

packages/types/src/providers/vscode-llm.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,8 @@ export const vscodeLlmModels = {
120120
inputPrice: 0,
121121
outputPrice: 0,
122122
family: "gemini-2.5-pro",
123-
version: "gemini-2.5-pro-preview-03-25",
124-
name: "Gemini 2.5 Pro (Preview)",
123+
version: "gemini-2.5-pro",
124+
name: "Gemini 2.5 Pro",
125125
supportsToolCalling: true,
126126
maxInputTokens: 63830,
127127
},

src/api/providers/fetchers/__tests__/openrouter.spec.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,6 @@ describe("OpenRouter API", () => {
223223
contextWindow: 1048576,
224224
supportsImages: true,
225225
supportsPromptCache: true,
226-
supportsReasoningBudget: true,
227226
inputPrice: 1.25,
228227
outputPrice: 10,
229228
cacheWritesPrice: 1.625,
@@ -237,7 +236,6 @@ describe("OpenRouter API", () => {
237236
contextWindow: 1048576,
238237
supportsImages: true,
239238
supportsPromptCache: true,
240-
supportsReasoningBudget: true,
241239
inputPrice: 1.25,
242240
outputPrice: 10,
243241
cacheWritesPrice: 1.625,

src/api/providers/gemini.ts

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,15 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
131131

132132
override getModel() {
133133
const modelId = this.options.apiModelId
134-
let id = modelId && modelId in geminiModels ? (modelId as GeminiModelId) : geminiDefaultModelId
134+
135+
// Handle backward compatibility for legacy preview model names
136+
let mappedModelId = modelId
137+
if (modelId && this.isLegacyPreviewModel(modelId)) {
138+
mappedModelId = "gemini-2.5-pro"
139+
}
140+
141+
let id =
142+
mappedModelId && mappedModelId in geminiModels ? (mappedModelId as GeminiModelId) : geminiDefaultModelId
135143
const info: ModelInfo = geminiModels[id]
136144
const params = getModelParams({ format: "gemini", modelId: id, model: info, settings: this.options })
137145

@@ -142,6 +150,15 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
142150
return { id: id.endsWith(":thinking") ? id.replace(":thinking", "") : id, info, ...params }
143151
}
144152

153+
protected isLegacyPreviewModel(modelId: string): boolean {
154+
const legacyPreviewModels = [
155+
"gemini-2.5-pro-preview-03-25",
156+
"gemini-2.5-pro-preview-05-06",
157+
"gemini-2.5-pro-preview-06-05",
158+
]
159+
return legacyPreviewModels.includes(modelId)
160+
}
161+
145162
async completePrompt(prompt: string): Promise<string> {
146163
try {
147164
const { id: model } = this.getModel()

src/api/providers/openrouter.ts

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -82,10 +82,13 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
8282
// other providers (including Gemini), so we need to explicitly disable
8383
// i We should generalize this using the logic in `getModelParams`, but
8484
// this is easier for now.
85-
if (
86-
(modelId === "google/gemini-2.5-pro-preview" || modelId === "google/gemini-2.5-pro") &&
87-
typeof reasoning === "undefined"
88-
) {
85+
// Handle backward compatibility for legacy preview model names
86+
let mappedModelId = modelId
87+
if (this.isLegacyGeminiPreviewModel(modelId)) {
88+
mappedModelId = "google/gemini-2.5-pro"
89+
}
90+
91+
if (mappedModelId === "google/gemini-2.5-pro" && typeof reasoning === "undefined") {
8992
reasoning = { exclude: true }
9093
}
9194

@@ -242,4 +245,8 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
242245
const completion = response as OpenAI.Chat.ChatCompletion
243246
return completion.choices[0]?.message?.content || ""
244247
}
248+
249+
private isLegacyGeminiPreviewModel(modelId: string): boolean {
250+
return modelId === "google/gemini-2.5-pro-preview"
251+
}
245252
}

src/api/providers/vertex.ts

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,15 @@ export class VertexHandler extends GeminiHandler implements SingleCompletionHand
1414

1515
override getModel() {
1616
const modelId = this.options.apiModelId
17-
let id = modelId && modelId in vertexModels ? (modelId as VertexModelId) : vertexDefaultModelId
17+
18+
// Handle backward compatibility for legacy preview model names
19+
let mappedModelId = modelId
20+
if (modelId && this.isLegacyPreviewModel(modelId)) {
21+
mappedModelId = "gemini-2.5-pro"
22+
}
23+
24+
let id =
25+
mappedModelId && mappedModelId in vertexModels ? (mappedModelId as VertexModelId) : vertexDefaultModelId
1826
const info: ModelInfo = vertexModels[id]
1927
const params = getModelParams({ format: "gemini", modelId: id, model: info, settings: this.options })
2028

0 commit comments

Comments
 (0)