@@ -11,7 +11,9 @@ import { Client } from "@modelcontextprotocol/sdk/client/index.js";
1111import { ModelManager } from "./models/index.js" ;
1212import { imageToBase64 } from "./utils/image.js" ;
1313import logger from "./utils/logger.js" ;
14- import { iQueryInput , iStreamMessage } from "./utils/types.js" ;
14+ import { iQueryInput , iStreamMessage , ModelSettings } from "./utils/types.js" ;
15+ import { openAIConvertToGeminiTools } from "./utils/toolHandler.js" ;
16+ import { ToolDefinition } from "@langchain/core/language_models/base" ;
1517
1618// Map to store abort controllers
1719export const abortControllerMap = new Map < string , AbortController > ( ) ;
@@ -31,7 +33,7 @@ interface TokenUsage {
3133
3234export async function handleProcessQuery (
3335 toolToClientMap : Map < string , Client > ,
34- availableTools : BindToolsInput [ ] ,
36+ availableTools : ToolDefinition [ ] ,
3537 model : BaseChatModel | null ,
3638 input : string | iQueryInput ,
3739 history : BaseMessage [ ] ,
@@ -121,12 +123,16 @@ export async function handleProcessQuery(
121123
122124 let hasToolCalls = true ;
123125
124- const runModel = modelManager . enableTools ? model . bindTools ?.( availableTools ) || model : model ;
126+ const tools = currentModelSettings ?. modelProvider === "google-genai" ? openAIConvertToGeminiTools ( availableTools ) : availableTools ;
127+
128+ const runModel = modelManager . enableTools ? model . bindTools ?.( tools ) || model : model ;
125129
126130 const isOllama = currentModelSettings ?. modelProvider === "ollama" ;
127131 const isDeepseek =
128132 currentModelSettings ?. configuration ?. baseURL ?. toLowerCase ( ) . includes ( "deepseek" ) ||
129133 currentModelSettings ?. model ?. toLowerCase ( ) . includes ( "deepseek" ) ;
134+ const isMistralai = currentModelSettings ?. modelProvider === "mistralai" ;
135+ const isBedrock = currentModelSettings ?. modelProvider === "bedrock" ;
130136
131137 logger . debug ( `[${ chatId } ] Start to process LLM query` ) ;
132138
@@ -141,7 +147,7 @@ export async function handleProcessQuery(
141147 try {
142148 // Track token usage if available
143149 for await ( const chunk of stream ) {
144- caculateTokenUsage ( tokenUsage , chunk , currentModelSettings ! . modelProvider ! ) ;
150+ caculateTokenUsage ( tokenUsage , chunk , currentModelSettings ! ) ;
145151
146152 if ( chunk . content ) {
147153 let chunkMessage = "" ;
@@ -235,6 +241,8 @@ export async function handleProcessQuery(
235241 throw error ;
236242 }
237243
244+ logger . debug ( `[${ chatId } ] Chunk collected` ) ;
245+
238246 // filter empty tool calls
239247 toolCalls = toolCalls . filter ( ( call ) => call ) ;
240248
@@ -247,6 +255,7 @@ export async function handleProcessQuery(
247255 break ;
248256 }
249257
258+ logger . debug ( `[${ chatId } ] Tool calls: ${ JSON . stringify ( toolCalls , null , 2 ) } ` ) ;
250259 // support anthropic multiple tool calls version but other not sure
251260 messages . push (
252261 new AIMessage ( {
@@ -257,14 +266,23 @@ export async function handleProcessQuery(
257266 text : currentContent || "." ,
258267 } ,
259268 // Deepseek will recursive when tool_use exist in content
260- ...( isDeepseek
269+ ...( isDeepseek || isMistralai || isBedrock
261270 ? [ ]
262- : toolCalls . map ( ( toolCall ) => ( {
271+ : toolCalls . map ( ( toolCall ) => {
272+ let parsedArgs = { }
273+ try {
274+ parsedArgs = toolCall . function . arguments === "" ? { } : JSON . parse ( toolCall . function . arguments ) ;
275+ } catch ( error ) {
276+ toolCall . function . arguments = "{}" ;
277+ logger . error ( `[${ chatId } ] Error parsing tool call ${ toolCall . function . name } args: ${ error } ` ) ;
278+ }
279+ return {
263280 type : "tool_use" ,
264281 id : toolCall . id ,
265282 name : toolCall . function . name ,
266- input : toolCall . function . arguments === "" ? { } : JSON . parse ( toolCall . function . arguments ) ,
267- } ) ) ) ,
283+ input : parsedArgs ,
284+ }
285+ } ) ) ,
268286 ] ,
269287 additional_kwargs : {
270288 tool_calls : toolCalls . map ( ( toolCall ) => ( {
@@ -297,6 +315,8 @@ export async function handleProcessQuery(
297315 ) ;
298316 }
299317
318+ logger . debug ( `[${ chatId } ] Tool calls collected` ) ;
319+
300320 // Execute all tool calls in parallel
301321 const toolResults = await Promise . all (
302322 toolCalls . map ( async ( toolCall ) => {
@@ -407,10 +427,14 @@ export async function handleProcessQuery(
407427 } )
408428 ) ;
409429
430+ logger . debug ( `[${ chatId } ] Tool results collected` ) ;
431+
410432 // Add tool results to conversation
411433 if ( toolResults . length > 0 ) {
412434 messages . push ( ...toolResults . map ( ( result ) => new ToolMessage ( result ) ) ) ;
413435 }
436+
437+ logger . debug ( `[${ chatId } ] Messages collected and ready to next round` ) ;
414438 }
415439
416440 // Log token usage at the end of processing
@@ -437,8 +461,20 @@ export async function handleProcessQuery(
437461 }
438462}
439463
440- function caculateTokenUsage ( tokenUsage : TokenUsage , chunk : AIMessageChunk , currentModelProvider : string ) {
441- switch ( currentModelProvider ) {
464+ function caculateTokenUsage ( tokenUsage : TokenUsage , chunk : AIMessageChunk , currentModelSettings : ModelSettings ) {
465+ if ( ! currentModelSettings ) {
466+ return ;
467+ }
468+
469+ if ( currentModelSettings . configuration ?. baseURL ?. toLowerCase ( ) . includes ( "silicon" ) ) {
470+ const usage = chunk . response_metadata . usage ;
471+ tokenUsage . totalInputTokens = usage ?. prompt_tokens || 0 ;
472+ tokenUsage . totalOutputTokens = usage ?. completion_tokens || 0 ;
473+ tokenUsage . totalTokens = usage ?. total_tokens || 0 ;
474+ return ;
475+ }
476+
477+ switch ( currentModelSettings . modelProvider ) {
442478 case "openai" :
443479 if ( chunk . response_metadata ?. usage ) {
444480 const usage = chunk . response_metadata . usage ;
@@ -455,7 +491,8 @@ function caculateTokenUsage(tokenUsage: TokenUsage, chunk: AIMessageChunk, curre
455491 tokenUsage . totalOutputTokens += usage ?. output_tokens || 0 ;
456492 tokenUsage . totalTokens += usage ?. total_tokens || 0 ;
457493 }
494+ break ;
458495 default :
459496 break ;
460497 }
461- }
498+ }
0 commit comments