@@ -300,4 +300,75 @@ describe("VsCodeLmHandler", () => {
300300 await expect ( promise ) . rejects . toThrow ( "VSCode LM completion error: Completion failed" )
301301 } )
302302 } )
303+
304+ describe ( "countTokens" , ( ) => {
305+ beforeEach ( ( ) => {
306+ const mockModel = { ...mockLanguageModelChat }
307+ ; ( vscode . lm . selectChatModels as Mock ) . mockResolvedValueOnce ( [ mockModel ] )
308+
309+ // Override the default client with our test client
310+ handler [ "client" ] = mockLanguageModelChat
311+ // Set up cancellation token
312+ handler [ "currentRequestCancellation" ] = new vscode . CancellationTokenSource ( )
313+ } )
314+
315+ it ( "should count tokens for string input" , async ( ) => {
316+ mockLanguageModelChat . countTokens . mockResolvedValue ( 10 )
317+
318+ const result = await handler . countTokens ( [ { type : "text" , text : "Hello world" } ] )
319+
320+ expect ( result ) . toBe ( 10 )
321+ expect ( mockLanguageModelChat . countTokens ) . toHaveBeenCalledWith ( "Hello world" , expect . any ( Object ) )
322+ } )
323+
324+ it ( "should handle special case when LanguageModelChatMessage returns tokenCount of 4" , async ( ) => {
325+ // First call returns 4 (triggering the special case)
326+ // Second call returns the actual count after string conversion
327+ mockLanguageModelChat . countTokens . mockResolvedValueOnce ( 4 ) . mockResolvedValueOnce ( 25 )
328+
329+ // Use the mocked vscode.LanguageModelChatMessage.User to create a proper message
330+ const mockMessage = vscode . LanguageModelChatMessage . User ( "This is a test message" )
331+
332+ const result = await handler [ "internalCountTokens" ] ( mockMessage )
333+
334+ expect ( result ) . toBe ( 25 )
335+ expect ( mockLanguageModelChat . countTokens ) . toHaveBeenCalledTimes ( 2 )
336+ // First call with the message object
337+ expect ( mockLanguageModelChat . countTokens ) . toHaveBeenNthCalledWith ( 1 , mockMessage , expect . any ( Object ) )
338+ // Second call with the extracted string
339+ expect ( mockLanguageModelChat . countTokens ) . toHaveBeenNthCalledWith (
340+ 2 ,
341+ "This is a test message" ,
342+ expect . any ( Object ) ,
343+ )
344+ } )
345+
346+ it ( "should not recalculate when tokenCount is not 4" , async ( ) => {
347+ mockLanguageModelChat . countTokens . mockResolvedValue ( 10 )
348+
349+ // Use the mocked vscode.LanguageModelChatMessage.User to create a proper message
350+ const mockMessage = vscode . LanguageModelChatMessage . User ( "This is a test message" )
351+
352+ const result = await handler [ "internalCountTokens" ] ( mockMessage )
353+
354+ expect ( result ) . toBe ( 10 )
355+ expect ( mockLanguageModelChat . countTokens ) . toHaveBeenCalledTimes ( 1 )
356+ } )
357+
358+ it ( "should handle image blocks" , async ( ) => {
359+ // The countTokens method converts to string, so it won't trigger the special case
360+ mockLanguageModelChat . countTokens . mockResolvedValue ( 7 )
361+
362+ const result = await handler . countTokens ( [
363+ { type : "text" , text : "Hello" } ,
364+ { type : "image" , source : { type : "base64" , media_type : "image/png" , data : "base64data" } } ,
365+ { type : "text" , text : " world" } ,
366+ ] )
367+
368+ expect ( result ) . toBe ( 7 )
369+ // Should only be called once since it's a string, not a LanguageModelChatMessage
370+ expect ( mockLanguageModelChat . countTokens ) . toHaveBeenCalledTimes ( 1 )
371+ expect ( mockLanguageModelChat . countTokens ) . toHaveBeenCalledWith ( "Hello[IMAGE] world" , expect . any ( Object ) )
372+ } )
373+ } )
303374} )
0 commit comments