-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Expand file tree
/
Copy pathollama_agent_reasoning.py
More file actions
44 lines (32 loc) · 1.3 KB
/
ollama_agent_reasoning.py
File metadata and controls
44 lines (32 loc) · 1.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from agent_framework import Agent
from agent_framework.ollama import OllamaChatClient
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
"""
Ollama Agent Reasoning Example
This sample demonstrates implementing a Ollama agent with reasoning.
Ensure to install Ollama and have a model running locally before running the sample
Not all Models support reasoning, to test reasoning try qwen3:8b
Set the model to use via the OLLAMA_MODEL environment variable or modify the code below.
https://ollama.com/
"""
async def main() -> None:
print("=== Response Reasoning Example ===")
agent = Agent(
client=OllamaChatClient(),
name="TimeAgent",
instructions="You are a helpful agent answer in one sentence.",
default_options={"think": True}, # Enable Reasoning on agent level
)
query = "Hey what is 3+4? Can you explain how you got to that answer?"
print(f"User: {query}")
# Enable Reasoning on per request level
result = await agent.run(query)
reasoning = "".join((c.text or "") for c in result.messages[-1].contents if c.type == "text_reasoning")
print(f"Reasoning: {reasoning}")
print(f"Answer: {result}\n")
if __name__ == "__main__":
asyncio.run(main())