Skip to content

Commit a45618f

Browse files
fix: enable tool calling for async agents with Gemini models
- Created _build_system_prompt method to centralize system prompt generation - Fixed async achat to use correct tools parameter (was always using self.tools) - Both sync and async now use enhanced system prompt with tool information - Added comprehensive async agent test to verify the fix This resolves the issue where async agents would claim no internet access even when tools were available. Co-authored-by: Mervin Praison <[email protected]>
1 parent 473c832 commit a45618f

File tree

3 files changed

+248
-41
lines changed

3 files changed

+248
-41
lines changed

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 54 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -713,52 +713,69 @@ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, to
713713
)
714714

715715
return current_response
716+
717+
def _build_system_prompt(self, tools=None):
718+
"""Build the system prompt with tool information.
719+
720+
Args:
721+
tools: Optional list of tools to use (defaults to self.tools)
722+
723+
Returns:
724+
str: The system prompt or None if use_system_prompt is False
725+
"""
726+
if not self.use_system_prompt:
727+
return None
728+
729+
system_prompt = f"""{self.backstory}\n
730+
Your Role: {self.role}\n
731+
Your Goal: {self.goal}"""
732+
733+
# Add tool usage instructions if tools are available
734+
# Use provided tools or fall back to self.tools
735+
tools_to_use = tools if tools is not None else self.tools
736+
if tools_to_use:
737+
tool_names = []
738+
for tool in tools_to_use:
739+
try:
740+
if callable(tool) and hasattr(tool, '__name__'):
741+
tool_names.append(tool.__name__)
742+
elif isinstance(tool, dict) and isinstance(tool.get('function'), dict) and 'name' in tool['function']:
743+
tool_names.append(tool['function']['name'])
744+
elif isinstance(tool, str):
745+
tool_names.append(tool)
746+
elif hasattr(tool, "to_openai_tool"):
747+
# Handle MCP tools
748+
openai_tools = tool.to_openai_tool()
749+
if isinstance(openai_tools, list):
750+
for t in openai_tools:
751+
if isinstance(t, dict) and 'function' in t and 'name' in t['function']:
752+
tool_names.append(t['function']['name'])
753+
elif isinstance(openai_tools, dict) and 'function' in openai_tools:
754+
tool_names.append(openai_tools['function']['name'])
755+
except (AttributeError, KeyError, TypeError) as e:
756+
logging.warning(f"Could not extract tool name from {tool}: {e}")
757+
continue
758+
759+
if tool_names:
760+
system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."
761+
762+
return system_prompt
716763

717-
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None):
764+
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None, tools=None):
718765
"""Build messages list for chat completion.
719766
720767
Args:
721768
prompt: The user prompt (str or list)
722769
temperature: Temperature for the chat
723770
output_json: Optional Pydantic model for JSON output
724771
output_pydantic: Optional Pydantic model for JSON output (alias)
772+
tools: Optional list of tools to use (defaults to self.tools)
725773
726774
Returns:
727775
tuple: (messages list, original prompt)
728776
"""
729-
# Build system prompt if enabled
730-
system_prompt = None
731-
if self.use_system_prompt:
732-
system_prompt = f"""{self.backstory}\n
733-
Your Role: {self.role}\n
734-
Your Goal: {self.goal}"""
735-
736-
# Add tool usage instructions if tools are available
737-
if self.tools:
738-
tool_names = []
739-
for tool in self.tools:
740-
try:
741-
if callable(tool) and hasattr(tool, '__name__'):
742-
tool_names.append(tool.__name__)
743-
elif isinstance(tool, dict) and isinstance(tool.get('function'), dict) and 'name' in tool['function']:
744-
tool_names.append(tool['function']['name'])
745-
elif isinstance(tool, str):
746-
tool_names.append(tool)
747-
elif hasattr(tool, "to_openai_tool"):
748-
# Handle MCP tools
749-
openai_tools = tool.to_openai_tool()
750-
if isinstance(openai_tools, list):
751-
for t in openai_tools:
752-
if isinstance(t, dict) and 'function' in t and 'name' in t['function']:
753-
tool_names.append(t['function']['name'])
754-
elif isinstance(openai_tools, dict) and 'function' in openai_tools:
755-
tool_names.append(openai_tools['function']['name'])
756-
except (AttributeError, KeyError, TypeError) as e:
757-
logging.warning(f"Could not extract tool name from {tool}: {e}")
758-
continue
759-
760-
if tool_names:
761-
system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."
777+
# Build system prompt using the helper method
778+
system_prompt = self._build_system_prompt(tools)
762779

763780
# Use openai_client's build_messages method if available
764781
if self._openai_client is not None:
@@ -1202,7 +1219,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
12021219
# Pass everything to LLM class
12031220
response_text = self.llm_instance.get_response(
12041221
prompt=prompt,
1205-
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
1222+
system_prompt=self._build_system_prompt(tools),
12061223
chat_history=self.chat_history,
12071224
temperature=temperature,
12081225
tools=tool_param,
@@ -1518,7 +1535,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
15181535
try:
15191536
response_text = await self.llm_instance.get_response_async(
15201537
prompt=prompt,
1521-
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
1538+
system_prompt=self._build_system_prompt(tools),
15221539
chat_history=self.chat_history,
15231540
temperature=temperature,
15241541
tools=tools,
@@ -1532,7 +1549,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
15321549
console=self.console,
15331550
agent_name=self.name,
15341551
agent_role=self.role,
1535-
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
1552+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
15361553
execute_tool_fn=self.execute_tool_async,
15371554
reasoning_steps=reasoning_steps
15381555
)

test_async_gemini_fix.py

Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
"""
2+
Test to verify that async agents with Gemini models properly use tools
3+
after the fix for issue #818
4+
"""
5+
import asyncio
6+
import logging
7+
from praisonaiagents import Agent, Task, PraisonAIAgents
8+
9+
# Enable logging to see tool calls
10+
logging.basicConfig(level=logging.INFO)
11+
12+
# Mock search tool
13+
async def mock_search(query: str) -> dict:
14+
"""Mock search tool for testing"""
15+
print(f"[TOOL CALLED] Searching for: {query}")
16+
return {
17+
"query": query,
18+
"results": [
19+
{
20+
"title": f"Result 1 for {query}",
21+
"snippet": f"This is a mock result about {query}",
22+
"url": "https://example.com/1"
23+
},
24+
{
25+
"title": f"Result 2 for {query}",
26+
"snippet": f"Another mock result about {query}",
27+
"url": "https://example.com/2"
28+
}
29+
],
30+
"status": "success"
31+
}
32+
33+
async def test_async_gemini_tools():
34+
"""Test async agents with Gemini models use tools correctly"""
35+
36+
# Create search agent with Gemini model
37+
search_agent = Agent(
38+
name="AsyncSearcher",
39+
role="Research Assistant",
40+
goal="Find information using the search tool",
41+
backstory="You are an expert at finding information online",
42+
tools=[mock_search],
43+
llm={"model": "gemini/gemini-1.5-flash-latest"},
44+
verbose=True
45+
)
46+
47+
# Create analysis agent without tools
48+
analysis_agent = Agent(
49+
name="Analyzer",
50+
role="Data Analyst",
51+
goal="Analyze search results",
52+
backstory="You excel at analyzing and summarizing information",
53+
llm={"model": "gemini/gemini-1.5-flash-latest"},
54+
verbose=True
55+
)
56+
57+
# Create tasks
58+
search_task = Task(
59+
name="search_task",
60+
description="Search for information about 'quantum computing breakthroughs 2024'",
61+
expected_output="Search results with at least 2 relevant findings",
62+
agent=search_agent,
63+
async_execution=True
64+
)
65+
66+
analysis_task = Task(
67+
name="analysis_task",
68+
description="Analyze the search results and provide a summary",
69+
expected_output="A concise summary of the findings",
70+
agent=analysis_agent,
71+
context=[search_task],
72+
async_execution=False
73+
)
74+
75+
# Create workflow
76+
workflow = PraisonAIAgents(
77+
agents=[search_agent, analysis_agent],
78+
tasks=[search_task, analysis_task],
79+
verbose=True
80+
)
81+
82+
# Execute async
83+
print("\n🚀 Starting async agent test with Gemini models...")
84+
result = await workflow.astart()
85+
86+
# Check results
87+
print("\n✅ Test Results:")
88+
print("-" * 50)
89+
90+
# Verify search agent used the tool
91+
search_result = str(result)
92+
if "mock result" in search_result.lower() or "tool called" in search_result.lower():
93+
print("✅ SUCCESS: Search agent properly used the mock_search tool!")
94+
else:
95+
print("❌ FAILURE: Search agent did NOT use the tool (claimed no internet access)")
96+
97+
# Show the actual output
98+
print("\nFinal output:")
99+
print(result)
100+
101+
return result
102+
103+
async def test_multiple_async_agents():
104+
"""Test multiple async agents running in parallel"""
105+
106+
agents = []
107+
tasks = []
108+
109+
# Create 3 search agents
110+
for i in range(3):
111+
agent = Agent(
112+
name=f"AsyncAgent{i}",
113+
role="Researcher",
114+
goal="Search for information",
115+
backstory="Expert researcher",
116+
tools=[mock_search],
117+
llm={"model": "gemini/gemini-1.5-flash-latest"}
118+
)
119+
120+
task = Task(
121+
name=f"task_{i}",
122+
description=f"Search for 'AI advancement #{i+1}'",
123+
expected_output="Search results",
124+
agent=agent,
125+
async_execution=True
126+
)
127+
128+
agents.append(agent)
129+
tasks.append(task)
130+
131+
# Execute all in parallel
132+
workflow = PraisonAIAgents(agents=agents, tasks=tasks)
133+
134+
print("\n🚀 Testing multiple async agents in parallel...")
135+
results = await workflow.astart()
136+
137+
# Verify all agents used tools
138+
success_count = 0
139+
for i, task in enumerate(tasks):
140+
if "mock result" in str(results).lower():
141+
success_count += 1
142+
143+
print(f"\n{success_count}/{len(tasks)} agents successfully used tools")
144+
145+
return results
146+
147+
async def main():
148+
"""Run all async tests"""
149+
try:
150+
# Test 1: Single async agent
151+
await test_async_gemini_tools()
152+
153+
# Test 2: Multiple async agents in parallel
154+
await test_multiple_async_agents()
155+
156+
print("\n🎉 All async tests completed!")
157+
158+
except Exception as e:
159+
print(f"\n❌ Error during testing: {e}")
160+
raise
161+
162+
if __name__ == "__main__":
163+
asyncio.run(main())

test_tool_fix_documentation.md

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,27 @@
44
Agents using Gemini models (`gemini/gemini-1.5-flash-8b`) were not calling provided tools, instead responding with "I do not have access to the internet" when tasked with searching.
55

66
## Root Cause
7-
The Gemini model through LiteLLM was not being properly instructed to use the available tools. The system prompt didn't mention the tools, and the tool_choice parameter wasn't being set.
7+
The Gemini model through LiteLLM was not being properly instructed to use the available tools. The system prompt didn't mention the tools, and the tool_choice parameter wasn't being set. Additionally, async agents were not passing the correct tools to the system prompt generation.
88

99
## Fix Applied (Updated)
1010

1111
### 1. Enhanced System Prompt (agent.py) - IMPROVED
12-
When tools are available, the agent's system prompt now explicitly mentions them with better error handling:
12+
A new `_build_system_prompt` method was created to centralize system prompt generation with tool information. This ensures both sync and async agents get the same enhanced prompt:
1313

1414
```python
15-
# In _build_messages method with enhanced error handling and MCP tool support
16-
if self.tools:
15+
# New _build_system_prompt method with enhanced error handling and MCP tool support
16+
def _build_system_prompt(self, tools=None):
17+
"""Build the system prompt with tool information."""
18+
if not self.use_system_prompt:
19+
return None
20+
21+
system_prompt = f"""{self.backstory}\n
22+
Your Role: {self.role}\n
23+
Your Goal: {self.goal}"""
24+
25+
# Use provided tools or fall back to self.tools
26+
tools_to_use = tools if tools is not None else self.tools
27+
if tools_to_use:
1728
tool_names = []
1829
for tool in self.tools:
1930
try:
@@ -59,6 +70,20 @@ if 'tools' in params and params['tools'] and 'tool_choice' not in params:
5970
params['tool_choice'] = 'auto'
6071
```
6172

73+
### 3. Async Agent Fix (agent.py) - NEW
74+
Fixed async agents to correctly pass tools for system prompt generation:
75+
76+
```python
77+
# In sync chat method:
78+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)]
79+
80+
# Fixed in async achat method (was always using self.tools):
81+
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)]
82+
83+
# Both sync and async now use enhanced system prompt:
84+
system_prompt=self._build_system_prompt(tools)
85+
```
86+
6287
## Testing the Fix
6388

6489
To test the fix, use the following code:
@@ -105,6 +130,8 @@ asyncio.run(test())
105130
3. **Better Model Detection**: More comprehensive Gemini model detection including variants like 'google/gemini'
106131
4. **Function Calling Support Check**: Uses litellm's `supports_function_calling` to verify model capabilities
107132
5. **Type Safety**: Added isinstance checks to prevent TypeErrors when accessing nested dictionaries
133+
6. **Async Agent Fix**: Fixed async agents to use the correct tools parameter instead of always using self.tools
134+
7. **Centralized System Prompt**: Created `_build_system_prompt` method to ensure consistency between sync and async paths
108135

109136
## Backward Compatibility
110137
- The fix only adds to existing functionality without modifying core behavior

0 commit comments

Comments
 (0)