Skip to content

Commit 2975c15

Browse files
authored
Merge pull request #157 from tobitege/feat_openai_gpt54_codex53
(feat)Add GPT-5.4 and GPT-5.3-Codex model support
2 parents d6da170 + d03fdca commit 2975c15

File tree

12 files changed

+167
-65
lines changed

12 files changed

+167
-65
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ foreach (ChatModel model in models)
127127
}
128128
```
129129

130-
💡 Instead of passing in a strongly typed model, you can pass a string instead: `await api.Chat.CreateConversation("gpt-4o")`, Tornado will automatically resolve the provider.
130+
💡 Instead of passing in a strongly typed model, you can pass a string instead: `await api.Chat.CreateConversation("gpt-5-mini")`, Tornado will automatically resolve the provider.
131131

132132
## ❄️ Vendor Extensions
133133

src/LlmTornado.Tests/ContextController/ContextWindowSummarizerOrderTests.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,10 +104,10 @@ public async Task PreservesOrder_WithChunkSplitting_MultipleSummaries()
104104

105105
var summarizer = CreateSummarizer(store);
106106

107-
// Small chunk size to force splits between U1/U2/U3
107+
// Small token-based chunk size to force splits between U1/U2/U3
108108
var options = new MessageCompressionOptions
109109
{
110-
ChunkSize = 250, // each ~200, so each becomes its own summary
110+
ChunkSize = 75, // each 200-char message is ~50 tokens, so contiguous messages split into separate summaries
111111
PreserveSystemmessages = true,
112112
CompressToolCallmessages = true,
113113
SummaryModel = TestModel,

src/LlmTornado/Chat/ChatRequest.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -568,7 +568,7 @@ private static string PreparePayload(JObject sourceObject, ChatRequest context,
568568
x.Temperature = null;
569569
}
570570

571-
// GPT-5.2 parameter compatibility
571+
// GPT-5.2 and GPT-5.4 parameter compatibility
572572
bool hasNonNoneReasoning = x.ReasoningEffort is not null && x.ReasoningEffort != ChatReasoningEfforts.None;
573573
if (ChatModelOpenAi.ShouldClearSamplingParams(x.Model, hasNonNoneReasoning))
574574
{

src/LlmTornado/Chat/Models/OpenAi/ChatModelOpenAi.cs

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,9 @@ public class ChatModelOpenAi : BaseVendorModelProvider
4545
public readonly ChatModelOpenAiGpt52 Gpt52 = new ChatModelOpenAiGpt52();
4646

4747
/// <summary>
48-
/// GPT-5.3 models.
48+
/// GPT-5.4 models.
4949
/// </summary>
50-
public readonly ChatModelOpenAiGpt53 Gpt53 = new ChatModelOpenAiGpt53();
50+
public readonly ChatModelOpenAiGpt54 Gpt54 = new ChatModelOpenAiGpt54();
5151

5252
/// <summary>
5353
/// O3 models.
@@ -98,14 +98,14 @@ public override bool OwnsModel(string model)
9898
/// </summary>
9999
public static List<IModel> ModelsAll => LazyModelsAll.Value;
100100

101-
private static readonly Lazy<List<IModel>> LazyModelsAll = new Lazy<List<IModel>>(() => [..ChatModelOpenAiGpt35.ModelsAll, ..ChatModelOpenAiGpt4.ModelsAll, ..ChatModelOpenAiO3.ModelsAll, ..ChatModelOpenAiO4.ModelsAll, ..ChatModelOpenAiGpt41.ModelsAll, ..ChatModelOpenAiGpt5.ModelsAll, ..ChatModelOpenAiGpt51.ModelsAll, ..ChatModelOpenAiGpt52.ModelsAll, ..ChatModelOpenAiGpt53.ModelsAll, ..ChatModelOpenAiCodex.ModelsAll]);
101+
private static readonly Lazy<List<IModel>> LazyModelsAll = new Lazy<List<IModel>>(() => [..ChatModelOpenAiGpt35.ModelsAll, ..ChatModelOpenAiGpt4.ModelsAll, ..ChatModelOpenAiO3.ModelsAll, ..ChatModelOpenAiO4.ModelsAll, ..ChatModelOpenAiGpt41.ModelsAll, ..ChatModelOpenAiGpt5.ModelsAll, ..ChatModelOpenAiGpt51.ModelsAll, ..ChatModelOpenAiGpt52.ModelsAll, ..ChatModelOpenAiGpt54.ModelsAll, ..ChatModelOpenAiCodex.ModelsAll]);
102102

103103
/// <summary>
104104
/// All reasoning models. Requests for these models are serialized differently.
105105
/// </summary>
106106
public static List<IModel> ReasoningModelsAll => LazyReasoningModelsAll.Value;
107107

108-
private static readonly Lazy<List<IModel>> LazyReasoningModelsAll = new Lazy<List<IModel>>(() => [..ChatModelOpenAiGpt4.ReasoningModels, ..ChatModelOpenAiO3.ModelsAll, ..ChatModelOpenAiO4.ModelsAll, ..ChatModelOpenAiGpt5.ModelsAll, ..ChatModelOpenAiGpt51.ModelsAll, ..ChatModelOpenAiGpt52.ModelsAll, ..ChatModelOpenAiGpt53.ModelsAll]);
108+
private static readonly Lazy<List<IModel>> LazyReasoningModelsAll = new Lazy<List<IModel>>(() => [..ChatModelOpenAiGpt4.ReasoningModels, ..ChatModelOpenAiO3.ModelsAll, ..ChatModelOpenAiO4.ModelsAll, ..ChatModelOpenAiGpt5.ModelsAll, ..ChatModelOpenAiGpt51.ModelsAll, ..ChatModelOpenAiGpt52.ModelsAll, ..ChatModelOpenAiGpt54.ModelsAll, ChatModelOpenAiCodex.ModelGpt53Codex]);
109109

110110
/// <summary>
111111
/// HashSet version of ReasoningModelsAll.
@@ -119,7 +119,7 @@ public override bool OwnsModel(string model)
119119
/// </summary>
120120
public static List<IModel> WebSearchCompatibleModelsAll => LazyWebSearchCompatibleModelsAll.Value;
121121

122-
private static readonly Lazy<List<IModel>> LazyWebSearchCompatibleModelsAll = new Lazy<List<IModel>>(() => [ChatModelOpenAiGpt4.ModelOSearchPreview, ChatModelOpenAiGpt4.ModelOMiniSearchPreview, ..ChatModelOpenAiGpt5.ModelsAll, ..ChatModelOpenAiGpt51.ModelsAll, ..ChatModelOpenAiGpt52.ModelsAll, ..ChatModelOpenAiGpt53.ModelsAll]);
122+
private static readonly Lazy<List<IModel>> LazyWebSearchCompatibleModelsAll = new Lazy<List<IModel>>(() => [ChatModelOpenAiGpt4.ModelOSearchPreview, ChatModelOpenAiGpt4.ModelOMiniSearchPreview, ..ChatModelOpenAiGpt5.ModelsAll, ..ChatModelOpenAiGpt51.ModelsAll, ..ChatModelOpenAiGpt52.ModelsAll, ..ChatModelOpenAiGpt54.ModelsAll]);
123123

124124
internal static HashSet<IModel> TempIncompatibleModels => LazyTempIncompatibleModels.Value;
125125

@@ -133,22 +133,24 @@ public override bool OwnsModel(string model)
133133
internal static HashSet<IModel> SamplingParamsNeverSupported => LazySamplingParamsNeverSupported.Value;
134134

135135
private static readonly Lazy<HashSet<IModel>> LazySamplingParamsNeverSupported = new Lazy<HashSet<IModel>>(() => [
136-
ChatModelOpenAiGpt5.ModelV5, ChatModelOpenAiGpt5.ModelV5Mini, ChatModelOpenAiGpt5.ModelV5Nano, ChatModelOpenAiGpt5.ModelV5Pro, ChatModelOpenAiGpt5.ModelV5Codex
136+
ChatModelOpenAiGpt5.ModelV5, ChatModelOpenAiGpt5.ModelV5Mini, ChatModelOpenAiGpt5.ModelV5Nano, ChatModelOpenAiGpt5.ModelV5Pro, ChatModelOpenAiGpt5.ModelV5Codex, ChatModelOpenAiGpt54.ModelV54Pro, ChatModelOpenAiCodex.ModelGpt53Codex
137137
]);
138138

139139
/// <summary>
140-
/// Models that conditionally support temperature/top_p/logprobs only when reasoning effort is none (GPT-5.2, GPT-5.1).
140+
/// Models that conditionally support temperature/top_p/logprobs only when reasoning effort is none (GPT-5.4, GPT-5.2, GPT-5.1).
141141
/// </summary>
142142
internal static HashSet<IModel> SamplingParamsConditionallySupported => LazySamplingParamsConditionallySupported.Value;
143143

144144
private static readonly Lazy<HashSet<IModel>> LazySamplingParamsConditionallySupported = new Lazy<HashSet<IModel>>(() => [
145-
..ChatModelOpenAiGpt51.ModelsAll, ..ChatModelOpenAiGpt52.ModelsAll, ..ChatModelOpenAiGpt53.ModelsAll
145+
..ChatModelOpenAiGpt51.ModelsAll, ..ChatModelOpenAiGpt52.ModelsAll, ChatModelOpenAiGpt54.ModelV54
146146
]);
147147

148148
/// <summary>
149149
/// Determines whether sampling parameters (temperature, top_p, logprobs) should be cleared for GPT-5.x models.
150-
/// GPT-5.2 parameter compatibility:
150+
/// GPT-5.4 parameter compatibility:
151151
/// - Older GPT-5 models (gpt-5, gpt-5-mini, gpt-5-nano) never support these parameters
152+
/// - GPT-5.4 only supports these on gpt-5.4 when reasoning effort is none, while gpt-5.4-pro never supports them
153+
/// - GPT-5.3-Codex never supports these because it only exposes reasoning modes low, medium, high, and xhigh
152154
/// - GPT-5.2 and GPT-5.1 only support these when reasoning effort is none
153155
/// </summary>
154156
/// <param name="model">The model being used.</param>

src/LlmTornado/Chat/Models/OpenAi/ChatModelOpenAiCodex.cs

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,27 @@ public class ChatModelOpenAiCodex : IVendorModelClassProvider
3737
/// </summary>
3838
public readonly ChatModel MiniLatest = ModelMiniLatest;
3939

40+
/// <summary>
41+
/// GPT-5.3-Codex is OpenAI's most capable agentic coding model to date.
42+
/// Supports low, medium, high, and xhigh reasoning effort settings.
43+
/// 400,000 context window, 128,000 max output tokens, Aug 31, 2025 knowledge cutoff.
44+
/// </summary>
45+
public static readonly ChatModel ModelGpt53Codex = new ChatModel("gpt-5.3-codex", LLmProviders.OpenAi, 400_000)
46+
{
47+
EndpointCapabilities = [ ChatModelEndpointCapabilities.Responses, ChatModelEndpointCapabilities.Chat, ChatModelEndpointCapabilities.Batch ]
48+
};
49+
50+
/// <summary>
51+
/// <inheritdoc cref="ModelGpt53Codex"/>
52+
/// </summary>
53+
public readonly ChatModel Gpt53Codex = ModelGpt53Codex;
54+
4055
/// <summary>
4156
/// All known Codex models from OpenAI.
4257
/// </summary>
4358
public static List<IModel> ModelsAll => LazyModelsAll.Value;
4459

45-
private static readonly Lazy<List<IModel>> LazyModelsAll = new Lazy<List<IModel>>(() => [ModelMiniLatest, ModelComputerUsePreview]);
60+
private static readonly Lazy<List<IModel>> LazyModelsAll = new Lazy<List<IModel>>(() => [ModelMiniLatest, ModelComputerUsePreview, ModelGpt53Codex]);
4661

4762
/// <summary>
4863
/// <inheritdoc cref="ModelsAll"/>

src/LlmTornado/Chat/Models/OpenAi/ChatModelOpenAiGpt53.cs

Lines changed: 0 additions & 46 deletions
This file was deleted.
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
using System;
2+
using System.Collections.Generic;
3+
using LlmTornado.Code;
4+
using LlmTornado.Code.Models;
5+
6+
namespace LlmTornado.Chat.Models;
7+
8+
/// <summary>
9+
/// GPT-5.4 class models from OpenAI.
10+
/// </summary>
11+
public class ChatModelOpenAiGpt54 : IVendorModelClassProvider
12+
{
13+
/// <summary>
14+
/// GPT-5.4 is OpenAI's frontier model for complex professional work.
15+
/// Supports reasoning.effort: none (default), low, medium, high, and xhigh.
16+
/// 1.05M context window.
17+
/// </summary>
18+
public static readonly ChatModel ModelV54 = new ChatModel("gpt-5.4", LLmProviders.OpenAi, 1_050_000, [])
19+
{
20+
EndpointCapabilities = [ ChatModelEndpointCapabilities.Responses, ChatModelEndpointCapabilities.Chat, ChatModelEndpointCapabilities.Batch ]
21+
};
22+
23+
/// <summary>
24+
/// <inheritdoc cref="ModelV54"/>
25+
/// </summary>
26+
public readonly ChatModel V54 = ModelV54;
27+
28+
/// <summary>
29+
/// GPT-5.4 Pro uses more compute to think harder and provide consistently better answers.
30+
/// Available in the Responses API only and supports reasoning.effort: medium, high, and xhigh.
31+
/// 1.05M context window.
32+
/// </summary>
33+
public static readonly ChatModel ModelV54Pro = new ChatModel("gpt-5.4-pro", LLmProviders.OpenAi, 1_050_000, [])
34+
{
35+
EndpointCapabilities = [ ChatModelEndpointCapabilities.Responses, ChatModelEndpointCapabilities.Batch ]
36+
};
37+
38+
/// <summary>
39+
/// <inheritdoc cref="ModelV54Pro"/>
40+
/// </summary>
41+
public readonly ChatModel V54Pro = ModelV54Pro;
42+
43+
/// <summary>
44+
/// All known GPT-5.4 models from OpenAI.
45+
/// </summary>
46+
public static List<IModel> ModelsAll => LazyModelsAll.Value;
47+
48+
private static readonly Lazy<List<IModel>> LazyModelsAll = new Lazy<List<IModel>>(() => [
49+
ModelV54, ModelV54Pro
50+
]);
51+
52+
/// <summary>
53+
/// <inheritdoc cref="ModelsAll"/>
54+
/// </summary>
55+
public List<IModel> AllModels => ModelsAll;
56+
57+
internal ChatModelOpenAiGpt54()
58+
{
59+
60+
}
61+
}

0 commit comments

Comments
 (0)