1+ #!/usr/bin/env python3
2+ """
3+ Test script to verify Ollama response overwrite fix
4+ """
5+
6+ import sys
7+ import os
8+
9+ # Add the source directory to the path
10+ sys .path .insert (0 , os .path .join (os .path .dirname (__file__ ), 'src' , 'praisonai-agents' ))
11+
12+ def test_ollama_tool_summary_logic ():
13+ """Test the logic that was causing the bug"""
14+ print ("Testing Ollama tool summary logic..." )
15+
16+ try :
17+ from praisonaiagents .llm .llm import LLM
18+
19+ # Create an Ollama LLM instance
20+ llm = LLM (model = "ollama/test" )
21+
22+ # Test the tool summary generation method directly
23+ tool_results = ["The stock price of Google is 100" ]
24+ empty_response = "" # This is what Ollama often returns
25+
26+ tool_summary = llm ._generate_ollama_tool_summary (tool_results , empty_response )
27+
28+ if tool_summary :
29+ print (f"✅ Tool summary generated correctly: { repr (tool_summary )} " )
30+
31+ # Simulate the fixed logic:
32+ # 1. Set final_response_text to tool_summary (this happens in the fix)
33+ final_response_text = tool_summary
34+ print (f"✅ final_response_text set to: { repr (final_response_text )} " )
35+
36+ # 2. Test the condition that was causing the bug
37+ iteration_count = 1 # Simulate that tools were executed
38+ response_text = "" # Simulate empty Ollama response
39+
40+ # OLD BUGGY LOGIC:
41+ # if iteration_count > 0:
42+ # final_response_text = response_text.strip() if response_text else ""
43+ # This would overwrite final_response_text with ""
44+
45+ # NEW FIXED LOGIC:
46+ if iteration_count > 0 and not final_response_text :
47+ final_response_text = response_text .strip () if response_text else ""
48+ print ("❌ This condition should NOT execute because final_response_text is already set" )
49+ else :
50+ print ("✅ Condition correctly skipped - final_response_text preserved" )
51+
52+ # 3. Verify final_response_text is still the tool summary
53+ if final_response_text == tool_summary :
54+ print ("✅ SUCCESS: Tool summary preserved through the fix!" )
55+ return True
56+ else :
57+ print (f"❌ FAILED: Tool summary was overwritten. Expected: { repr (tool_summary )} , Got: { repr (final_response_text )} " )
58+ return False
59+ else :
60+ print ("❌ Tool summary generation failed" )
61+ return False
62+
63+ except Exception as e :
64+ print (f"❌ Test failed with error: { e } " )
65+ return False
66+
67+ def test_non_ollama_compatibility ():
68+ """Test that the fix doesn't break non-Ollama models"""
69+ print ("\n Testing non-Ollama compatibility..." )
70+
71+ try :
72+ from praisonaiagents .llm .llm import LLM
73+
74+ # Create a non-Ollama LLM instance
75+ llm = LLM (model = "gpt-3.5-turbo" )
76+
77+ # Test the tool summary generation method with non-Ollama model
78+ tool_results = ["The stock price of Google is 100" ]
79+ response_text = "Based on the tools, Google's stock price is 100"
80+
81+ tool_summary = llm ._generate_ollama_tool_summary (tool_results , response_text )
82+
83+ if tool_summary is None :
84+ print ("✅ Non-Ollama model correctly returns None for tool summary" )
85+
86+ # Simulate the logic for non-Ollama models
87+ final_response_text = ""
88+ iteration_count = 1
89+
90+ # This should execute for non-Ollama models since final_response_text is empty
91+ if iteration_count > 0 and not final_response_text :
92+ final_response_text = response_text .strip () if response_text else ""
93+ print ("✅ Non-Ollama logic executed correctly" )
94+
95+ if final_response_text == response_text :
96+ print ("✅ SUCCESS: Non-Ollama compatibility maintained!" )
97+ return True
98+ else :
99+ print (f"❌ FAILED: Non-Ollama logic broken. Expected: { repr (response_text )} , Got: { repr (final_response_text )} " )
100+ return False
101+ else :
102+ print (f"❌ Non-Ollama model incorrectly generated summary: { tool_summary } " )
103+ return False
104+
105+ except Exception as e :
106+ print (f"❌ Test failed with error: { e } " )
107+ return False
108+
109+ def main ():
110+ """Run all tests"""
111+ print ("=" * 60 )
112+ print ("Testing Ollama Response Overwrite Fix" )
113+ print ("=" * 60 )
114+
115+ test1_passed = test_ollama_tool_summary_logic ()
116+ test2_passed = test_non_ollama_compatibility ()
117+
118+ print ("\n " + "=" * 60 )
119+ print ("TEST RESULTS:" )
120+ print (f"Ollama tool summary logic: { '✅ PASSED' if test1_passed else '❌ FAILED' } " )
121+ print (f"Non-Ollama compatibility: { '✅ PASSED' if test2_passed else '❌ FAILED' } " )
122+
123+ if test1_passed and test2_passed :
124+ print ("\n 🎉 ALL TESTS PASSED! The fix should resolve the issue." )
125+ return True
126+ else :
127+ print ("\n 💥 SOME TESTS FAILED! The fix needs more work." )
128+ return False
129+
130+ if __name__ == "__main__" :
131+ success = main ()
132+ sys .exit (0 if success else 1 )
0 commit comments