Skip to content

Commit af07784

Browse files
Merge pull request #943 from MervinPraison/claude/issue-940-20250715-2304
fix: Resolve Ollama infinite tool call loops by improving response handling
2 parents c2d9a9b + ca62af0 commit af07784

File tree

2 files changed

+233
-0
lines changed

2 files changed

+233
-0
lines changed

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -302,6 +302,42 @@ def _is_ollama_provider(self) -> bool:
302302

303303
return False
304304

305+
def _generate_ollama_tool_summary(self, tool_results: List[Any], response_text: str) -> Optional[str]:
306+
"""
307+
Generate a summary from tool results for Ollama to prevent infinite loops.
308+
309+
This prevents infinite loops where Ollama provides an empty response after a
310+
tool call, expecting the user to prompt for a summary.
311+
312+
Args:
313+
tool_results: The list of results from tool execution.
314+
response_text: The text response from the LLM.
315+
316+
Returns:
317+
A summary string if conditions are met, otherwise None.
318+
"""
319+
# Constant for minimal response length check
320+
OLLAMA_MIN_RESPONSE_LENGTH = 10
321+
322+
# Only generate summary for Ollama with tool results
323+
if not (self._is_ollama_provider() and tool_results):
324+
return None
325+
326+
# If response is substantial, no summary needed
327+
if response_text and len(response_text.strip()) > OLLAMA_MIN_RESPONSE_LENGTH:
328+
return None
329+
330+
# Build tool summary efficiently
331+
summary_lines = ["Based on the tool execution results:"]
332+
for i, result in enumerate(tool_results):
333+
if isinstance(result, dict) and 'result' in result:
334+
function_name = result.get('function_name', 'Tool')
335+
summary_lines.append(f"- {function_name}: {result['result']}")
336+
else:
337+
summary_lines.append(f"- Tool {i+1}: {result}")
338+
339+
return "\n".join(summary_lines)
340+
305341
def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
306342
"""
307343
Format tool result message for Ollama provider.
@@ -1072,6 +1108,12 @@ def get_response(
10721108
final_response_text = response_text.strip()
10731109
break
10741110

1111+
# Special handling for Ollama to prevent infinite loops
1112+
tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1113+
if tool_summary:
1114+
final_response_text = tool_summary
1115+
break
1116+
10751117
# Otherwise, continue the loop to check if more tools are needed
10761118
iteration_count += 1
10771119
continue
@@ -1815,6 +1857,12 @@ async def get_response_async(
18151857
final_response_text = response_text.strip()
18161858
break
18171859

1860+
# Special handling for Ollama to prevent infinite loops
1861+
tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1862+
if tool_summary:
1863+
final_response_text = tool_summary
1864+
break
1865+
18181866
# Continue the loop to check if more tools are needed
18191867
iteration_count += 1
18201868
continue

test_ollama_fix.py

Lines changed: 185 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,185 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Test script to validate the Ollama infinite loop fix.
4+
5+
This script tests that:
6+
1. Ollama provider detection works correctly
7+
2. Tool results summary generation works as expected
8+
3. Loop termination logic prevents infinite loops
9+
4. Backward compatibility is maintained for other providers
10+
"""
11+
12+
import sys
13+
import os
14+
15+
# Add the source directory to the path
16+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src', 'praisonai-agents'))
17+
18+
def test_module_imports():
19+
"""Test that we can import the required modules."""
20+
try:
21+
from praisonaiagents.llm.llm import LLM
22+
print("✅ Successfully imported LLM module")
23+
return True
24+
except ImportError as e:
25+
print(f"❌ Failed to import LLM module: {e}")
26+
return False
27+
28+
def test_ollama_provider_detection():
29+
"""Test Ollama provider detection logic."""
30+
try:
31+
from praisonaiagents.llm.llm import LLM
32+
33+
# Test Ollama provider detection
34+
ollama_llm = LLM(model="ollama/qwen3")
35+
is_ollama = ollama_llm._is_ollama_provider()
36+
37+
if is_ollama:
38+
print("✅ Ollama provider detection works correctly")
39+
else:
40+
print("❌ Ollama provider detection failed")
41+
return False
42+
43+
# Test non-Ollama provider
44+
openai_llm = LLM(model="gpt-4o-mini")
45+
is_not_ollama = not openai_llm._is_ollama_provider()
46+
47+
if is_not_ollama:
48+
print("✅ Non-Ollama provider detection works correctly")
49+
else:
50+
print("❌ Non-Ollama provider incorrectly detected as Ollama")
51+
return False
52+
53+
return True
54+
55+
except Exception as e:
56+
print(f"❌ Provider detection test failed: {e}")
57+
return False
58+
59+
def test_tool_summary_generation():
60+
"""Test that tool results summary generation works correctly by calling production code."""
61+
try:
62+
from praisonaiagents.llm.llm import LLM
63+
64+
# Create an Ollama LLM instance
65+
ollama_llm = LLM(model="ollama/test")
66+
67+
# Mock tool results like what would be generated
68+
tool_results = [
69+
"The stock price of Google is 100",
70+
200
71+
]
72+
73+
# Test with empty response (should generate summary)
74+
summary = ollama_llm._generate_ollama_tool_summary(tool_results, "")
75+
expected_summary = "Based on the tool execution results:\n- Tool 1: The stock price of Google is 100\n- Tool 2: 200"
76+
77+
if summary == expected_summary:
78+
print("✅ Tool summary generation (empty response) works correctly")
79+
else:
80+
print("❌ Tool summary generation (empty response) failed")
81+
print(f"Expected: {repr(expected_summary)}")
82+
print(f"Got: {repr(summary)}")
83+
return False
84+
85+
# Test with minimal response (should generate summary)
86+
summary_minimal = ollama_llm._generate_ollama_tool_summary(tool_results, "ok")
87+
if summary_minimal == expected_summary:
88+
print("✅ Tool summary generation (minimal response) works correctly")
89+
else:
90+
print("❌ Tool summary generation (minimal response) failed")
91+
return False
92+
93+
# Test with substantial response (should NOT generate summary)
94+
summary_substantial = ollama_llm._generate_ollama_tool_summary(tool_results, "This is a detailed response with more than 10 characters")
95+
if summary_substantial is None:
96+
print("✅ Tool summary generation correctly skips substantial responses")
97+
else:
98+
print("❌ Tool summary generation incorrectly generated summary for substantial response")
99+
return False
100+
101+
# Test with non-Ollama model (should NOT generate summary)
102+
non_ollama_llm = LLM(model="gpt-4o-mini")
103+
summary_non_ollama = non_ollama_llm._generate_ollama_tool_summary(tool_results, "")
104+
if summary_non_ollama is None:
105+
print("✅ Tool summary generation correctly skips non-Ollama models")
106+
else:
107+
print("❌ Tool summary generation incorrectly generated summary for non-Ollama model")
108+
return False
109+
110+
return True
111+
112+
except Exception as e:
113+
print(f"❌ Tool summary generation test failed: {e}")
114+
return False
115+
116+
def test_backward_compatibility():
117+
"""Test that the fix doesn't break other LLM providers."""
118+
try:
119+
from praisonaiagents.llm.llm import LLM
120+
121+
# Test that non-Ollama providers aren't affected
122+
models_to_test = [
123+
"gpt-4o-mini",
124+
"claude-3-sonnet",
125+
"gemini/gemini-2.5-pro"
126+
]
127+
128+
for model in models_to_test:
129+
try:
130+
llm = LLM(model=model)
131+
is_ollama = llm._is_ollama_provider()
132+
if not is_ollama:
133+
print(f"✅ Model {model} correctly identified as non-Ollama")
134+
else:
135+
print(f"❌ Model {model} incorrectly identified as Ollama")
136+
return False
137+
except Exception as e:
138+
print(f"⚠️ Could not test model {model}: {e}")
139+
140+
print("✅ Backward compatibility verified")
141+
return True
142+
143+
except Exception as e:
144+
print(f"❌ Backward compatibility test failed: {e}")
145+
return False
146+
147+
def main():
148+
"""Run all tests."""
149+
print("🚀 Testing Ollama infinite loop fix...")
150+
print("=" * 50)
151+
152+
tests = [
153+
("Module Imports", test_module_imports),
154+
("Ollama Provider Detection", test_ollama_provider_detection),
155+
("Tool Summary Generation", test_tool_summary_generation),
156+
("Backward Compatibility", test_backward_compatibility),
157+
]
158+
159+
passed = 0
160+
total = len(tests)
161+
162+
for test_name, test_func in tests:
163+
print(f"\n🧪 Running: {test_name}")
164+
try:
165+
if test_func():
166+
passed += 1
167+
print(f"✅ {test_name}: PASSED")
168+
else:
169+
print(f"❌ {test_name}: FAILED")
170+
except Exception as e:
171+
print(f"❌ {test_name}: ERROR - {e}")
172+
173+
print("\n" + "=" * 50)
174+
print(f"📊 Test Results: {passed}/{total} tests passed")
175+
176+
if passed == total:
177+
print("🎉 All tests passed! The Ollama fix is working correctly.")
178+
return True
179+
else:
180+
print("⚠️ Some tests failed. Please review the implementation.")
181+
return False
182+
183+
if __name__ == "__main__":
184+
success = main()
185+
sys.exit(0 if success else 1)

0 commit comments

Comments
 (0)