Skip to content

Commit a96e1e4

Browse files
committed
Adds GPT-5 models and enforces temperature reqs
- Ensures unset temperature for GPT5 models as it is unsupported
1 parent 90e460f commit a96e1e4

File tree

5 files changed

+49
-5
lines changed

5 files changed

+49
-5
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/) and this p
1616
- Adds ability to set or change the upstream branch for branches in the _Commit Graph_ and other GitLens views ([#4498](https://github.com/gitkraken/vscode-gitlens/issues/4498))
1717
- Adds new _Set Upstream..._ and _Change Upstream..._ context menu items to branches in the _Commit Graph_ and other GitLens views
1818
- Adds a new _upstream_ sub-command to the _branch_ Git Command Palette
19+
- Adds updated AI model support for GitLens' AI features
20+
- Adds GPT-5 family (GPT-5, GPT-5 Mini, GPT-5 Nano), and Claude 4.1 Opus models
1921
- Add Azure DevOps Server integration support ([#4478](https://github.com/gitkraken/vscode-gitlens/issues/4478))
2022

2123
### Changed

src/plus/ai/models/model.ts

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,45 @@ export const openAIModels = <T extends OpenAIProviders>(provider: AIProviderDesc
9797
provider: provider,
9898
hidden: true,
9999
},
100+
{
101+
id: 'gpt-5',
102+
name: 'GPT-5',
103+
maxTokens: { input: 400000, output: 128000 },
104+
provider: provider,
105+
},
106+
{
107+
id: 'gpt-5-2025-08-07',
108+
name: 'GPT-5',
109+
maxTokens: { input: 400000, output: 128000 },
110+
provider: provider,
111+
hidden: true,
112+
},
113+
{
114+
id: 'gpt-5-mini',
115+
name: 'GPT-5 mini',
116+
maxTokens: { input: 400000, output: 128000 },
117+
provider: provider,
118+
},
119+
{
120+
id: 'gpt-5-mini-2025-08-07',
121+
name: 'GPT-5 mini',
122+
maxTokens: { input: 400000, output: 128000 },
123+
provider: provider,
124+
hidden: true,
125+
},
126+
{
127+
id: 'gpt-5-nano',
128+
name: 'GPT-5 nano',
129+
maxTokens: { input: 400000, output: 128000 },
130+
provider: provider,
131+
},
132+
{
133+
id: 'gpt-5-nano-2025-08-07',
134+
name: 'GPT-5 nano',
135+
maxTokens: { input: 400000, output: 128000 },
136+
provider: provider,
137+
hidden: true,
138+
},
100139
{
101140
id: 'o4-mini',
102141
name: 'o4 mini',

src/plus/ai/openAICompatibleProviderBase.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ export abstract class OpenAICompatibleProviderBase<T extends AIProviders> implem
130130
max_completion_tokens: model.maxTokens.output
131131
? Math.min(modelOptions?.outputTokens ?? Infinity, model.maxTokens.output)
132132
: modelOptions?.outputTokens,
133-
temperature: getValidatedTemperature(modelOptions?.temperature ?? model.temperature),
133+
temperature: getValidatedTemperature(model, modelOptions?.temperature ?? model.temperature),
134134
};
135135

136136
const rsp = await this.fetchCore(action, model, apiKey, request, cancellation);

src/plus/ai/utils/-webview/ai.utils.ts

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -137,10 +137,13 @@ export async function getOrPromptApiKey(
137137
return apiKey;
138138
}
139139

140-
export function getValidatedTemperature(modelTemperature?: number | null): number | undefined {
140+
export function getValidatedTemperature(model: AIModel, modelTemperature?: number | null): number | undefined {
141141
if (modelTemperature === null) return undefined;
142-
if (modelTemperature != null) return modelTemperature;
143-
return Math.max(0, Math.min(configuration.get('ai.modelOptions.temperature'), 2));
142+
// GPT5 doesn't support anything but the default temperature
143+
if (model.id.startsWith('gpt-5')) return undefined;
144+
145+
modelTemperature ??= Math.max(0, Math.min(configuration.get('ai.modelOptions.temperature'), 2));
146+
return modelTemperature;
144147
}
145148

146149
export async function showLargePromptWarning(estimatedTokens: number, threshold: number): Promise<boolean> {

src/plus/ai/vscodeProvider.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ export class VSCodeAIProvider implements AIProvider<typeof provider.id> {
100100
outputTokens: model.maxTokens.output
101101
? Math.min(options.modelOptions?.outputTokens ?? Infinity, model.maxTokens.output)
102102
: options.modelOptions?.outputTokens,
103-
temperature: getValidatedTemperature(model.temperature),
103+
temperature: getValidatedTemperature(model, model.temperature),
104104
},
105105
},
106106
options.cancellation,

0 commit comments

Comments
 (0)