@@ -349,13 +349,170 @@ def _format_ollama_tool_result_message(self, function_name: str, tool_result: An
349349 "content" : f"The { function_name } function returned: { tool_result_str } "
350350 }
351351
352+ def _analyze_tool_chain_context (self , tool_results : List [Any ], response_text : str ) -> dict :
353+ """
354+ Analyze tool execution context to understand the current state of the tool chain.
355+
356+ Returns a context dictionary with confidence scores and patterns.
357+ """
358+ context = {
359+ 'tool_count' : len (tool_results ) if tool_results else 0 ,
360+ 'has_sequential_tools' : False ,
361+ 'has_final_computation' : False ,
362+ 'has_data_retrieval' : False ,
363+ 'confidence_score' : 0.0 ,
364+ 'tool_names' : [],
365+ 'tool_success_rate' : 1.0
366+ }
367+
368+ if not tool_results :
369+ return context
370+
371+ # Extract tool names and analyze patterns
372+ successful_tools = 0
373+ for result in tool_results :
374+ if isinstance (result , dict ) and 'function_name' in result :
375+ tool_name = result ['function_name' ].lower ()
376+ context ['tool_names' ].append (tool_name )
377+
378+ # Check if tool execution was successful
379+ if 'error' not in result or not result .get ('error' ):
380+ successful_tools += 1
381+
382+ # Analyze tool types for patterns
383+ if any (pattern in tool_name for pattern in ['get' , 'fetch' , 'search' , 'retrieve' , 'find' ]):
384+ context ['has_data_retrieval' ] = True
385+
386+ if any (pattern in tool_name for pattern in ['calculate' , 'compute' , 'multiply' , 'add' , 'sum' , 'process' ]):
387+ context ['has_final_computation' ] = True
388+
389+ # Calculate success rate
390+ if tool_results :
391+ context ['tool_success_rate' ] = successful_tools / len (tool_results )
392+
393+ # Detect sequential tool usage patterns
394+ if len (set (context ['tool_names' ])) > 1 :
395+ context ['has_sequential_tools' ] = True
396+
397+ # Calculate confidence score based on tool chain analysis
398+ confidence = 0.0
399+
400+ # Sequential tools with final computation suggest completion
401+ if context ['has_sequential_tools' ] and context ['has_final_computation' ]:
402+ confidence += 0.4
403+
404+ # Data retrieval followed by processing
405+ if context ['has_data_retrieval' ] and context ['has_final_computation' ]:
406+ confidence += 0.3
407+
408+ # High success rate adds confidence
409+ confidence += context ['tool_success_rate' ] * 0.2
410+
411+ # Multiple tools executed successfully
412+ if context ['tool_count' ] >= 2 and context ['tool_success_rate' ] > 0.8 :
413+ confidence += 0.1
414+
415+ context ['confidence_score' ] = min (confidence , 1.0 )
416+ return context
417+
418+ def _assess_response_quality (self , response_text : str , tool_results : List [Any ]) -> dict :
419+ """
420+ Assess the quality and completeness of a response based on content analysis.
421+
422+ Returns quality metrics and confidence scores.
423+ """
424+ quality = {
425+ 'length' : len (response_text .strip ()) if response_text else 0 ,
426+ 'has_tool_references' : False ,
427+ 'has_conclusion_indicators' : False ,
428+ 'contains_results' : False ,
429+ 'quality_score' : 0.0
430+ }
431+
432+ if not response_text :
433+ return quality
434+
435+ response_lower = response_text .lower ().strip ()
436+
437+ # Check for tool result integration
438+ if tool_results :
439+ tool_result_strings = [str (result ) for result in tool_results if result ]
440+ for tool_result in tool_result_strings :
441+ if tool_result and any (part in response_lower for part in str (tool_result ).lower ().split () if len (part ) > 3 ):
442+ quality ['has_tool_references' ] = True
443+ break
444+
445+ # Check for conclusion indicators (dynamic pattern matching)
446+ conclusion_indicators = ['therefore' , 'so' , 'result' , 'answer' , 'conclusion' , 'final' , 'total' , 'summary' ]
447+ quality ['has_conclusion_indicators' ] = any (indicator in response_lower for indicator in conclusion_indicators )
448+
449+ # Check if response contains actual results/data
450+ if any (char .isdigit () for char in response_text ) or '$' in response_text :
451+ quality ['contains_results' ] = True
452+
453+ # Calculate quality score
454+ score = 0.0
455+
456+ # Response length contributes to quality
457+ if quality ['length' ] > 20 :
458+ score += 0.2
459+ if quality ['length' ] > 50 :
460+ score += 0.1
461+ if quality ['length' ] > 100 :
462+ score += 0.1
463+
464+ # Content quality indicators
465+ if quality ['has_tool_references' ]:
466+ score += 0.3
467+ if quality ['has_conclusion_indicators' ]:
468+ score += 0.2
469+ if quality ['contains_results' ]:
470+ score += 0.1
471+
472+ quality ['quality_score' ] = min (score , 1.0 )
473+ return quality
474+
475+ def _should_generate_tool_summary (self , tool_results : List [Any ], response_text : str , iteration_count : int ) -> bool :
476+ """
477+ Dynamically determine if a tool summary should be generated based on context analysis.
478+
479+ This replaces the hardcoded iteration_count >= 5 check with intelligent analysis.
480+ """
481+ # Analyze tool execution context
482+ tool_context = self ._analyze_tool_chain_context (tool_results , response_text )
483+
484+ # Assess response quality
485+ response_quality = self ._assess_response_quality (response_text , tool_results )
486+
487+ # Decision logic based on dynamic analysis
488+ should_generate = False
489+
490+ # High confidence that tool chain is complete
491+ if tool_context ['confidence_score' ] >= 0.7 :
492+ should_generate = True
493+
494+ # Good tool chain with quality response
495+ elif tool_context ['confidence_score' ] >= 0.5 and response_quality ['quality_score' ] >= 0.6 :
496+ should_generate = True
497+
498+ # Sequential tools with final computation and good response
499+ elif (tool_context ['has_sequential_tools' ] and
500+ tool_context ['has_final_computation' ] and
501+ response_quality ['quality_score' ] >= 0.4 ):
502+ should_generate = True
503+
504+ # Safety fallback - prevent infinite loops (increased threshold)
505+ elif iteration_count >= 7 :
506+ should_generate = True
507+
508+ return should_generate
509+
352510 def _is_final_answer (self , response_text : str , has_tool_calls : bool , tool_results : List [Any ]) -> bool :
353511 """
354512 Determine if a response is a final answer or intermediate acknowledgment.
355513
356- This method provides intelligent differentiation between:
357- - Intermediate responses that acknowledge tool execution
358- - Final responses that contain actual answers to user queries
514+ This method provides intelligent differentiation using dynamic analysis
515+ instead of hardcoded patterns.
359516
360517 Args:
361518 response_text: The text response from the LLM
@@ -368,40 +525,31 @@ def _is_final_answer(self, response_text: str, has_tool_calls: bool, tool_result
368525 if not response_text or not response_text .strip ():
369526 return False
370527
371- response_lower = response_text .lower ().strip ()
372-
373528 # If response contains tool calls, it's likely not a final answer
374529 if has_tool_calls :
375530 return False
531+
532+ # For Ollama, use dynamic analysis instead of hardcoded patterns
533+ if self ._is_ollama_provider () and tool_results :
534+ # Analyze tool chain context
535+ tool_context = self ._analyze_tool_chain_context (tool_results , response_text )
376536
377- # For Ollama, be more conservative about what constitutes a final answer
378- if self ._is_ollama_provider ():
379- # If we have recent tool results, check if this is just acknowledgment
380- if tool_results :
381- # Common patterns of tool acknowledgment (not final answers)
382- acknowledgment_patterns = [
383- "i'll" , "let me" , "now i'll" , "next i'll" , "i need to" , "i should" ,
384- "executing" , "calling" , "running" , "using the" , "based on this" ,
385- "now let me" , "let me now" , "i will now" , "proceeding to" ,
386- "moving to" , "continuing with" , "next step" , "now that i have" ,
387- "tool executed" , "function called" , "result obtained" , "got the result"
388- ]
389-
390- # Check if response is primarily acknowledgment
391- if any (pattern in response_lower for pattern in acknowledgment_patterns ):
392- # If it's short and contains acknowledgment patterns, likely intermediate
393- if len (response_text .strip ()) < 50 :
394- return False
395-
396- # If response is very short and we have tool results, likely intermediate
397- if len (response_text .strip ()) < 30 :
398- return False
399-
400- # Additional check: if response mainly contains status updates or simple confirmations
401- status_patterns = ["done" , "completed" , "finished" , "successful" , "ok" , "ready" ]
402- if (len (response_text .strip ()) < 40 and
403- any (pattern in response_lower for pattern in status_patterns )):
404- return False
537+ # Assess response quality
538+ response_quality = self ._assess_response_quality (response_text , tool_results )
539+
540+ # Dynamic decision based on context and quality
541+ # If we have a complete tool chain with quality response, it's likely final
542+ if (tool_context ['confidence_score' ] >= 0.6 and
543+ response_quality ['quality_score' ] >= 0.5 ):
544+ return True
545+
546+ # If response is very short and we have tool results, likely intermediate
547+ if response_quality ['length' ] < 20 :
548+ return False
549+
550+ # If response doesn't reference tool results, likely intermediate
551+ if not response_quality ['has_tool_references' ] and response_quality ['length' ] < 80 :
552+ return False
405553
406554 # For other providers, maintain existing behavior
407555 # Substantial content (>10 chars) is considered final
@@ -1167,8 +1315,8 @@ def get_response(
11671315 break
11681316
11691317 # Special handling for Ollama to prevent infinite loops
1170- # Only generate summary if we're approaching max iterations or stuck in a loop
1171- if self ._is_ollama_provider () and iteration_count >= 5 :
1318+ # Use dynamic analysis instead of hardcoded iteration count
1319+ if self ._is_ollama_provider () and self . _should_generate_tool_summary ( tool_results , response_text , iteration_count ) :
11721320 tool_summary = self ._generate_ollama_tool_summary (tool_results , response_text )
11731321 if tool_summary :
11741322 final_response_text = tool_summary
@@ -1918,8 +2066,8 @@ async def get_response_async(
19182066 break
19192067
19202068 # Special handling for Ollama to prevent infinite loops
1921- # Only generate summary if we're approaching max iterations or stuck in a loop
1922- if self ._is_ollama_provider () and iteration_count >= 5 :
2069+ # Use dynamic analysis instead of hardcoded iteration count
2070+ if self ._is_ollama_provider () and self . _should_generate_tool_summary ( tool_results , response_text , iteration_count ) :
19232071 tool_summary = self ._generate_ollama_tool_summary (tool_results , response_text )
19242072 if tool_summary :
19252073 final_response_text = tool_summary
0 commit comments