Skip to content

Commit db7b90e

Browse files
committed
feat: add weather server and Ollama LLM integration with test files
1 parent db28e0a commit db7b90e

File tree

7 files changed

+2589
-2130
lines changed

7 files changed

+2589
-2130
lines changed

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 101 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -549,6 +549,7 @@ def get_response(
549549
})
550550

551551
should_continue = False
552+
tool_results = [] # Store all tool results
552553
for tool_call in tool_calls:
553554
# Handle both object and dict access patterns
554555
if isinstance(tool_call, dict):
@@ -569,6 +570,7 @@ def get_response(
569570
logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
570571
tool_result = execute_tool_fn(function_name, arguments)
571572
logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
573+
tool_results.append(tool_result) # Store the result
572574

573575
if verbose:
574576
display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -601,7 +603,8 @@ def get_response(
601603
# If we reach here, no more tool calls needed - get final response
602604
# Make one more call to get the final summary response
603605
# Special handling for Ollama models that don't automatically process tool results
604-
if self.model and self.model.startswith("ollama/") and tool_result:
606+
ollama_handled = False
607+
if self.model and self.model.startswith("ollama/") and tool_results:
605608
# For Ollama models, we need to explicitly ask the model to process the tool results
606609
# First, check if the response is just a JSON tool call
607610
try:
@@ -614,13 +617,30 @@ def get_response(
614617
# Create a prompt that asks the model to process the tool results based on original context
615618
# Extract the original user query from messages
616619
original_query = ""
617-
for msg in messages:
620+
for msg in reversed(messages): # Look from the end to find the most recent user message
618621
if msg.get("role") == "user":
619-
original_query = msg.get("content", "")
620-
break
622+
content = msg.get("content", "")
623+
# Handle list content (multimodal)
624+
if isinstance(content, list):
625+
for item in content:
626+
if isinstance(item, dict) and item.get("type") == "text":
627+
original_query = item.get("text", "")
628+
break
629+
else:
630+
original_query = content
631+
if original_query:
632+
break
633+
634+
# Create a shorter follow-up prompt with all tool results
635+
# If there's only one result, use it directly; otherwise combine them
636+
if len(tool_results) == 1:
637+
results_text = json.dumps(tool_results[0], indent=2)
638+
else:
639+
results_text = json.dumps(tool_results, indent=2)
621640

622-
# Create a shorter follow-up prompt
623-
follow_up_prompt = f"Results:\n{json.dumps(tool_result, indent=2)}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
641+
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
642+
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
643+
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
624644

625645
# Make a follow-up call to process the results
626646
follow_up_messages = [
@@ -653,12 +673,33 @@ def get_response(
653673
):
654674
if chunk and chunk.choices and chunk.choices[0].delta.content:
655675
response_text += chunk.choices[0].delta.content
676+
677+
# Set flag to indicate Ollama was handled
678+
ollama_handled = True
679+
final_response_text = response_text.strip()
680+
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
681+
682+
# Display the response if we got one
683+
if final_response_text and verbose:
684+
display_interaction(
685+
original_prompt,
686+
final_response_text,
687+
markdown=markdown,
688+
generation_time=time.time() - start_time,
689+
console=console
690+
)
691+
692+
# Return the final response after processing Ollama's follow-up
693+
if final_response_text:
694+
return final_response_text
695+
else:
696+
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
656697
except (json.JSONDecodeError, KeyError):
657698
# Not a JSON response or not a tool call format, continue normally
658699
pass
659700

660-
# If reasoning_steps is True, do a single non-streaming call
661-
elif reasoning_steps:
701+
# If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
702+
if reasoning_steps and not ollama_handled:
662703
resp = litellm.completion(
663704
**self._build_completion_params(
664705
messages=messages,
@@ -688,8 +729,8 @@ def get_response(
688729
console=console
689730
)
690731

691-
# Otherwise do the existing streaming approach
692-
else:
732+
# Otherwise do the existing streaming approach if not already handled
733+
elif not ollama_handled:
693734
# Get response after tool calls with streaming
694735
if verbose:
695736
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
@@ -1225,6 +1266,7 @@ async def get_response_async(
12251266
"tool_calls": serializable_tool_calls
12261267
})
12271268

1269+
tool_results = [] # Store all tool results
12281270
for tool_call in tool_calls:
12291271
# Handle both object and dict access patterns
12301272
if isinstance(tool_call, dict):
@@ -1243,6 +1285,7 @@ async def get_response_async(
12431285
tool_call_id = f"tool_{id(tool_call)}"
12441286

12451287
tool_result = await execute_tool_fn(function_name, arguments)
1288+
tool_results.append(tool_result) # Store the result
12461289

12471290
if verbose:
12481291
display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1261,7 +1304,8 @@ async def get_response_async(
12611304
response_text = ""
12621305

12631306
# Special handling for Ollama models that don't automatically process tool results
1264-
if self._is_ollama_provider() and tool_result:
1307+
ollama_handled = False
1308+
if self._is_ollama_provider() and tool_results:
12651309
# For Ollama models, we need to explicitly ask the model to process the tool results
12661310
# First, check if the response is just a JSON tool call
12671311
try:
@@ -1274,13 +1318,30 @@ async def get_response_async(
12741318
# Create a prompt that asks the model to process the tool results based on original context
12751319
# Extract the original user query from messages
12761320
original_query = ""
1277-
for msg in messages:
1321+
for msg in reversed(messages): # Look from the end to find the most recent user message
12781322
if msg.get("role") == "user":
1279-
original_query = msg.get("content", "")
1280-
break
1323+
content = msg.get("content", "")
1324+
# Handle list content (multimodal)
1325+
if isinstance(content, list):
1326+
for item in content:
1327+
if isinstance(item, dict) and item.get("type") == "text":
1328+
original_query = item.get("text", "")
1329+
break
1330+
else:
1331+
original_query = content
1332+
if original_query:
1333+
break
1334+
1335+
# Create a shorter follow-up prompt with all tool results
1336+
# If there's only one result, use it directly; otherwise combine them
1337+
if len(tool_results) == 1:
1338+
results_text = json.dumps(tool_results[0], indent=2)
1339+
else:
1340+
results_text = json.dumps(tool_results, indent=2)
12811341

1282-
# Create a shorter follow-up prompt
1283-
follow_up_prompt = f"Results:\n{json.dumps(tool_result, indent=2)}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1342+
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1343+
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1344+
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
12841345

12851346
# Make a follow-up call to process the results
12861347
follow_up_messages = [
@@ -1313,12 +1374,33 @@ async def get_response_async(
13131374
):
13141375
if chunk and chunk.choices and chunk.choices[0].delta.content:
13151376
response_text += chunk.choices[0].delta.content
1377+
1378+
# Set flag to indicate Ollama was handled
1379+
ollama_handled = True
1380+
final_response_text = response_text.strip()
1381+
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1382+
1383+
# Display the response if we got one
1384+
if final_response_text and verbose:
1385+
display_interaction(
1386+
original_prompt,
1387+
final_response_text,
1388+
markdown=markdown,
1389+
generation_time=time.time() - start_time,
1390+
console=console
1391+
)
1392+
1393+
# Return the final response after processing Ollama's follow-up
1394+
if final_response_text:
1395+
return final_response_text
1396+
else:
1397+
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
13161398
except (json.JSONDecodeError, KeyError):
13171399
# Not a JSON response or not a tool call format, continue normally
13181400
pass
13191401

13201402
# If no special handling was needed or if it's not an Ollama model
1321-
elif reasoning_steps:
1403+
if reasoning_steps and not ollama_handled:
13221404
# Non-streaming call to capture reasoning
13231405
resp = await litellm.acompletion(
13241406
**self._build_completion_params(
@@ -1348,8 +1430,8 @@ async def get_response_async(
13481430
generation_time=time.time() - start_time,
13491431
console=console
13501432
)
1351-
else:
1352-
# Get response after tool calls with streaming
1433+
elif not ollama_handled:
1434+
# Get response after tool calls with streaming if not already handled
13531435
if verbose:
13541436
async for chunk in await litellm.acompletion(
13551437
**self._build_completion_params(

src/praisonai-agents/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "praisonaiagents"
7-
version = "0.0.106"
7+
version = "0.0.107"
88
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
99
requires-python = ">=3.10"
1010
authors = [

src/praisonai-agents/test.py

Lines changed: 31 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,39 @@
1-
from praisonaiagents import Agent, Task, PraisonAIAgents
21
import os
3-
from dotenv import load_dotenv
42

5-
load_dotenv()
3+
# Set OpenAI API configuration BEFORE importing praisonaiagents
4+
# os.environ["OPENAI_API_BASE"] = "http://localhost:1234/v1"
5+
# os.environ["OPENAI_API_KEY"] = "not-needed"
66

7-
llm_config = {
8-
"model": "openai/gpt-4o-mini",
9-
"api_key": os.getenv('OPENAI_API_KEY'),
10-
"temperature": 0.7,
11-
"max_tokens": 2000
12-
}
7+
# Now import after setting the environment
8+
from praisonaiagents import Agent, MCP
139

14-
blog_agent = Agent(
15-
role="Blog Writer",
16-
goal="Write a blog post about AI",
17-
backstory="Expert at writing blog posts",
18-
llm=llm_config,
19-
)
10+
# Paths to python and the weather server script
11+
python_path = os.getenv("PYTHON_PATH", "python")
12+
server_path = os.getenv("WEATHER_SERVER_PATH", "weather_server.py")
2013

21-
blog_task = Task(
22-
description="Write a blog post about AI trends in 1 paragraph",
23-
expected_output="Well-written blog post about AI trends",
24-
agent=blog_agent
25-
)
14+
# Create the agent with Ollama
15+
weather_agent = Agent(
16+
name="Weather Assistant",
17+
role="Weather assistant",
18+
goal="Provide accurate and timely weather information for various cities",
19+
instructions="""
20+
You are a helpful weather assistant that can provide current weather information,
21+
forecasts, and weather comparisons for different cities. Use the available weather tools to answer
22+
user questions about weather conditions. You can:
23+
24+
- Get current weather for cities
25+
- Get hourly forecasts
26+
- Compare weather between two cities
27+
- Use both mock data and real API data (when API key is provided)
28+
- Set use_real_api True to use real API data all the time
2629
27-
agents = PraisonAIAgents(
28-
agents=[blog_agent],
29-
tasks=[blog_task],
30-
memory=False
30+
Always use the appropriate weather tools when users ask about weather information.
31+
""",
32+
llm="ollama/llama3.2", # Using Ollama with llama3.2
33+
tools=MCP(f"{python_path} {server_path}"),
34+
verbose=True
3135
)
3236

33-
result = agents.start()
37+
# Optional: run a sample task
38+
response = weather_agent.start("What's the weather in London?")
39+
print(response)

0 commit comments

Comments
 (0)