Skip to content

Commit 5031ff8

Browse files
committed
tokenize: accept chatrequest, responsesrequest
1 parent 4820258 commit 5031ff8

File tree

10 files changed

+387
-8
lines changed

10 files changed

+387
-8
lines changed

src/LlmTornado.Demo/ChatDemo.cs

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -959,6 +959,71 @@ await chat.StreamResponseRich(new ChatStreamEventHandler
959959
});
960960
}
961961

962+
[TornadoTest]
963+
public static async Task AudioInGptAudioMp3()
964+
{
965+
Conversation chat = Program.Connect().Chat.CreateConversation(new ChatRequest
966+
{
967+
Model = ChatModel.OpenAi.Gpt5.Audio,
968+
Modalities = [ ChatModelModalities.Text ],
969+
MaxTokens = 2000
970+
});
971+
972+
byte[] audioData = await File.ReadAllBytesAsync("Static/Audio/sample.mp3");
973+
974+
chat.AppendUserInput([
975+
new ChatMessagePart("What is being said in this audio?"),
976+
new ChatMessagePart(audioData, ChatAudioFormats.Mp3)
977+
]);
978+
979+
string? str = await chat.GetResponse();
980+
Console.WriteLine(str);
981+
}
982+
983+
[TornadoTest]
984+
public static async Task AudioInGptAudioWav()
985+
{
986+
Conversation chat = Program.Connect().Chat.CreateConversation(new ChatRequest
987+
{
988+
Model = ChatModel.OpenAi.Gpt5.Audio,
989+
Modalities = [ ChatModelModalities.Text ],
990+
MaxTokens = 2000
991+
});
992+
993+
byte[] audioData = await File.ReadAllBytesAsync("Static/Audio/sample.wav");
994+
995+
chat.AppendUserInput([
996+
new ChatMessagePart(audioData, ChatAudioFormats.Wav)
997+
]);
998+
999+
string? str = await chat.GetResponse();
1000+
Console.WriteLine(str);
1001+
}
1002+
1003+
[TornadoTest]
1004+
public static async Task AudioInGptAudioDirect()
1005+
{
1006+
TornadoApi api = Program.Connect();
1007+
1008+
byte[] audioData = await File.ReadAllBytesAsync("Static/Audio/sample.mp3");
1009+
1010+
List<ChatMessagePart> parts =
1011+
[
1012+
new ChatMessagePart("What is being said in this audio?"),
1013+
new ChatMessagePart(audioData, ChatAudioFormats.Mp3)
1014+
];
1015+
1016+
ChatResult? result = await api.Chat.CreateChatCompletion(new ChatRequest
1017+
{
1018+
Model = ChatModel.OpenAi.Gpt5.Audio,
1019+
Modalities = [ ChatModelModalities.Text ],
1020+
MaxTokens = 2000,
1021+
Messages = [new ChatMessage(ChatMessageRoles.User, parts)]
1022+
});
1023+
1024+
Console.WriteLine(result?.Choices?.FirstOrDefault()?.Message?.Content);
1025+
}
1026+
9621027
[TornadoTest]
9631028
public static async Task AudioInAudioOutMultiturn()
9641029
{

src/LlmTornado.Demo/TokenizeDemo.cs

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
using LlmTornado.ChatFunctions;
88
using LlmTornado.Code;
99
using LlmTornado.Common;
10+
using LlmTornado.Responses;
1011
using LlmTornado.Tokenize;
1112
using LlmTornado.Tokenize.Vendors;
1213

@@ -165,5 +166,129 @@ public static async Task TokenizeCohereLongText()
165166

166167
Console.WriteLine($"Cohere long text tokenization: {result.TotalTokens} tokens");
167168
}
169+
170+
[TornadoTest]
171+
public static async Task TokenizeOpenAiText()
172+
{
173+
TokenizeRequest request = new TokenizeRequest(ChatModel.OpenAi.Gpt5.V5Mini, "Hello, world! This is a test message.");
174+
TokenizeResult? result = await Program.ConnectMulti().Tokenize.CountTokens(request);
175+
176+
Assert.That(result, Is.NotNull);
177+
Assert.That(result!.TotalTokens, Is.GreaterThan(0));
178+
179+
Console.WriteLine($"OpenAI text tokenization: {result.TotalTokens} tokens");
180+
}
181+
182+
[TornadoTest]
183+
public static async Task TokenizeOpenAiMessages()
184+
{
185+
TokenizeRequest request = new TokenizeRequest(ChatModel.OpenAi.Gpt5.V5Mini, [
186+
new ChatMessage(ChatMessageRoles.System, "You are a helpful assistant."),
187+
new ChatMessage(ChatMessageRoles.User, "Hello, how are you?"),
188+
new ChatMessage(ChatMessageRoles.Assistant, "I'm doing well, thank you!")
189+
]);
190+
191+
TokenizeResult? result = await Program.ConnectMulti().Tokenize.CountTokens(request);
192+
193+
Assert.That(result, Is.NotNull);
194+
Assert.That(result!.TotalTokens, Is.GreaterThan(0));
195+
196+
Console.WriteLine($"OpenAI messages tokenization: {result.TotalTokens} tokens");
197+
}
198+
199+
[TornadoTest]
200+
public static async Task TokenizeOpenAiWithTools()
201+
{
202+
List<Tool> tools =
203+
[
204+
new Tool(new ToolFunction("get_weather", "Get the weather for a location", new
205+
{
206+
type = "object",
207+
properties = new
208+
{
209+
location = new
210+
{
211+
type = "string",
212+
description = "The city and state, e.g. San Francisco, CA"
213+
}
214+
},
215+
required = new[] { "location" }
216+
}))
217+
];
218+
219+
TokenizeRequest request = new TokenizeRequest(
220+
ChatModel.OpenAi.Gpt5.V5Mini,
221+
[
222+
new ChatMessage(ChatMessageRoles.System, "You are a helpful assistant."),
223+
new ChatMessage(ChatMessageRoles.User, "What's the weather in San Francisco?")
224+
],
225+
tools
226+
);
227+
228+
TokenizeResult? result = await Program.ConnectMulti().Tokenize.CountTokens(request);
229+
230+
Assert.That(result, Is.NotNull);
231+
Assert.That(result!.TotalTokens, Is.GreaterThan(0));
232+
233+
Console.WriteLine($"OpenAI with tools tokenization: {result.TotalTokens} tokens");
234+
}
235+
236+
[TornadoTest]
237+
[TornadoTestCase("gpt-5-mini")]
238+
[TornadoTestCase("claude-sonnet-4-20250514")]
239+
[TornadoTestCase("gemini-2.5-flash")]
240+
public static async Task TokenizeChatRequestWithTools(string model)
241+
{
242+
ChatRequest chatRequest = new ChatRequest
243+
{
244+
Model = model,
245+
Messages = [
246+
new ChatMessage(ChatMessageRoles.System, "You are a helpful assistant with access to tools."),
247+
new ChatMessage(ChatMessageRoles.User, "What's the weather in San Francisco and New York?")
248+
],
249+
Tools = [
250+
new Tool(new ToolFunction("get_weather", "Get the weather for a location", new
251+
{
252+
type = "object",
253+
properties = new
254+
{
255+
location = new
256+
{
257+
type = "string",
258+
description = "The city and state, e.g. San Francisco, CA"
259+
}
260+
},
261+
required = new[] { "location" }
262+
}))
263+
]
264+
};
265+
266+
TokenizeRequest request = new TokenizeRequest(chatRequest);
267+
TokenizeResult? result = await Program.ConnectMulti().Tokenize.CountTokens(request);
268+
269+
Assert.That(result, Is.NotNull);
270+
Assert.That(result!.TotalTokens, Is.GreaterThan(0));
271+
272+
Console.WriteLine($"[{model}] ChatRequest with tools tokenization: {result.TotalTokens} tokens");
273+
}
274+
275+
[TornadoTest]
276+
public static async Task TokenizeOpenAiFromResponseRequest()
277+
{
278+
ResponseRequest responseRequest = new ResponseRequest
279+
{
280+
Model = ChatModel.OpenAi.Gpt5.V5Mini,
281+
Instructions = "You are a helpful assistant.",
282+
InputString = "What is the meaning of life?"
283+
};
284+
285+
TokenizeRequest request = new TokenizeRequest(responseRequest);
286+
TokenizeResult? result = await Program.ConnectMulti().Tokenize.CountTokens(request);
287+
288+
Assert.That(result, Is.NotNull);
289+
Assert.That(result!.TotalTokens, Is.GreaterThan(0));
290+
291+
Console.WriteLine($"OpenAI from ResponseRequest tokenization: {result.TotalTokens} tokens");
292+
}
168293
}
169294

src/LlmTornado/Chat/Models/OpenAi/ChatModelOpenAiGpt5.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ public class ChatModelOpenAiGpt5 : IVendorModelClassProvider
132132
public static List<IModel> ModelsAll => LazyModelsAll.Value;
133133

134134
private static readonly Lazy<List<IModel>> LazyModelsAll = new Lazy<List<IModel>>(() => [
135-
ModelV5, ModelV5Mini, ModelV5Nano, ModelV5Codex, ModelV5Pro, ModelAudioMini, ModelAudio15, ModelRealtimeMini
135+
ModelV5, ModelV5Mini, ModelV5Nano, ModelV5Codex, ModelV5Pro, ModelAudio, ModelAudioMini, ModelAudio15, ModelRealtimeMini
136136
]);
137137

138138
/// <summary>

src/LlmTornado/Responses/ResponseHelpers.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -556,7 +556,7 @@ public static ChatChoice ToChatChoice(ResponseResult response, ResponseRequest r
556556
return null;
557557
}
558558

559-
private static List<ResponseTool> ConvertTools(List<Tool> tools)
559+
internal static List<ResponseTool> ConvertTools(List<Tool> tools)
560560
{
561561
List<ResponseTool> result = new List<ResponseTool>(tools.Count);
562562

src/LlmTornado/Responses/ResponseRequest.cs

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -290,4 +290,53 @@ public ResponseRequest(ChatModel model, List<ResponseInputItem> inputItems)
290290
public ResponseRequest()
291291
{
292292
}
293+
294+
/// <summary>
295+
/// Creates a shallow copy of the given request, optionally stripping generation-only fields.
296+
/// </summary>
297+
internal ResponseRequest(ResponseRequest basedOn, bool forTokenization = false)
298+
{
299+
Background = basedOn.Background;
300+
Conversation = basedOn.Conversation;
301+
Include = basedOn.Include;
302+
InputString = basedOn.InputString;
303+
InputItems = basedOn.InputItems;
304+
Instructions = basedOn.Instructions;
305+
MaxOutputTokens = basedOn.MaxOutputTokens;
306+
MaxToolCalls = basedOn.MaxToolCalls;
307+
Metadata = basedOn.Metadata;
308+
Model = basedOn.Model;
309+
ParallelToolCalls = basedOn.ParallelToolCalls;
310+
PreviousResponseId = basedOn.PreviousResponseId;
311+
Prompt = basedOn.Prompt;
312+
Reasoning = basedOn.Reasoning;
313+
ServiceTier = basedOn.ServiceTier;
314+
Store = basedOn.Store;
315+
Temperature = basedOn.Temperature;
316+
Text = basedOn.Text;
317+
ToolChoice = basedOn.ToolChoice;
318+
Tools = basedOn.Tools;
319+
TopLogprobs = basedOn.TopLogprobs;
320+
TopP = basedOn.TopP;
321+
Truncation = basedOn.Truncation;
322+
PromptCacheKey = basedOn.PromptCacheKey;
323+
PromptCacheRetention = basedOn.PromptCacheRetention;
324+
SafetyIdentifier = basedOn.SafetyIdentifier;
325+
User = basedOn.User;
326+
Verbosity = basedOn.Verbosity;
327+
ContextManagement = basedOn.ContextManagement;
328+
329+
if (forTokenization)
330+
{
331+
Stream = null;
332+
StreamOptions = null;
333+
Store = null;
334+
Background = null;
335+
}
336+
else
337+
{
338+
Stream = basedOn.Stream;
339+
StreamOptions = basedOn.StreamOptions;
340+
}
341+
}
293342
}

0 commit comments

Comments
 (0)