Skip to content

Commit c87604c

Browse files
authored
examples: add gpt-oss browser example (#558)
1 parent 53ff3cd commit c87604c

File tree

3 files changed

+377
-2
lines changed

3 files changed

+377
-2
lines changed

examples/README.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,10 @@ See [ollama/docs/api.md](https://github.com/ollama/ollama/blob/main/docs/api.md)
2828
- [multi-tool.py](multi-tool.py) - Using multiple tools, with thinking enabled
2929

3030
#### gpt-oss
31-
- [gpt-oss-tools.py](gpt-oss-tools.py) - Using tools with gpt-oss
32-
- [gpt-oss-tools-stream.py](gpt-oss-tools-stream.py) - Using tools with gpt-oss, with streaming enabled
31+
- [gpt-oss-tools.py](gpt-oss-tools.py)
32+
- [gpt-oss-tools-stream.py](gpt-oss-tools-stream.py)
33+
- [gpt-oss-tools-browser.py](gpt-oss-tools-browser.py) - Using browser research tools with gpt-oss
34+
- [gpt-oss-tools-browser-stream.py](gpt-oss-tools-browser-stream.py) - Using browser research tools with gpt-oss, with streaming enabled
3335

3436

3537
### Multimodal with Images - Chat with a multimodal (image chat) model
Lines changed: 198 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,198 @@
1+
# /// script
2+
# requires-python = ">=3.11"
3+
# dependencies = [
4+
# "gpt-oss",
5+
# "ollama",
6+
# "rich",
7+
# ]
8+
# ///
9+
10+
import asyncio
11+
import json
12+
from typing import Iterator, Optional
13+
14+
from gpt_oss.tools.simple_browser import ExaBackend, SimpleBrowserTool
15+
from openai_harmony import Author, Role, TextContent
16+
from openai_harmony import Message as HarmonyMessage
17+
from rich import print
18+
19+
from ollama import Client
20+
from ollama._types import ChatResponse
21+
22+
_backend = ExaBackend(source='web')
23+
_browser_tool = SimpleBrowserTool(backend=_backend)
24+
25+
26+
def heading(text):
27+
print(text)
28+
print('=' * (len(text) + 3))
29+
30+
31+
async def _browser_search_async(query: str, topn: int = 10, source: str | None = None) -> str:
32+
# map Ollama message to Harmony format
33+
harmony_message = HarmonyMessage(
34+
author=Author(role=Role.USER),
35+
content=[TextContent(text=json.dumps({'query': query, 'topn': topn}))],
36+
recipient='browser.search',
37+
)
38+
39+
result_text: str = ''
40+
async for response in _browser_tool._process(harmony_message):
41+
if response.content:
42+
for content in response.content:
43+
if isinstance(content, TextContent):
44+
result_text += content.text
45+
return result_text or f'No results for query: {query}'
46+
47+
48+
async def _browser_open_async(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: str | None = None) -> str:
49+
payload = {'id': id, 'cursor': cursor, 'loc': loc, 'num_lines': num_lines, 'view_source': view_source, 'source': source}
50+
51+
harmony_message = HarmonyMessage(
52+
author=Author(role=Role.USER),
53+
content=[TextContent(text=json.dumps(payload))],
54+
recipient='browser.open',
55+
)
56+
57+
result_text: str = ''
58+
async for response in _browser_tool._process(harmony_message):
59+
if response.content:
60+
for content in response.content:
61+
if isinstance(content, TextContent):
62+
result_text += content.text
63+
return result_text or f'Could not open: {id}'
64+
65+
66+
async def _browser_find_async(pattern: str, cursor: int = -1) -> str:
67+
payload = {'pattern': pattern, 'cursor': cursor}
68+
69+
harmony_message = HarmonyMessage(
70+
author=Author(role=Role.USER),
71+
content=[TextContent(text=json.dumps(payload))],
72+
recipient='browser.find',
73+
)
74+
75+
result_text: str = ''
76+
async for response in _browser_tool._process(harmony_message):
77+
if response.content:
78+
for content in response.content:
79+
if isinstance(content, TextContent):
80+
result_text += content.text
81+
return result_text or f'Pattern not found: {pattern}'
82+
83+
84+
def browser_search(query: str, topn: int = 10, source: Optional[str] = None) -> str:
85+
return asyncio.run(_browser_search_async(query=query, topn=topn, source=source))
86+
87+
88+
def browser_open(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: Optional[str] = None) -> str:
89+
return asyncio.run(_browser_open_async(id=id, cursor=cursor, loc=loc, num_lines=num_lines, view_source=view_source, source=source))
90+
91+
92+
def browser_find(pattern: str, cursor: int = -1) -> str:
93+
return asyncio.run(_browser_find_async(pattern=pattern, cursor=cursor))
94+
95+
96+
# Schema definitions for each browser tool
97+
browser_search_schema = {
98+
'type': 'function',
99+
'function': {
100+
'name': 'browser.search',
101+
},
102+
}
103+
104+
browser_open_schema = {
105+
'type': 'function',
106+
'function': {
107+
'name': 'browser.open',
108+
},
109+
}
110+
111+
browser_find_schema = {
112+
'type': 'function',
113+
'function': {
114+
'name': 'browser.find',
115+
},
116+
}
117+
118+
available_tools = {
119+
'browser.search': browser_search,
120+
'browser.open': browser_open,
121+
'browser.find': browser_find,
122+
}
123+
124+
125+
model = 'gpt-oss:20b'
126+
print('Model: ', model, '\n')
127+
128+
prompt = 'What is Ollama?'
129+
print('You: ', prompt, '\n')
130+
messages = [{'role': 'user', 'content': prompt}]
131+
132+
client = Client()
133+
134+
# gpt-oss can call tools while "thinking"
135+
# a loop is needed to call the tools and get the results
136+
final = True
137+
while True:
138+
response_stream: Iterator[ChatResponse] = client.chat(
139+
model=model,
140+
messages=messages,
141+
tools=[browser_search_schema, browser_open_schema, browser_find_schema],
142+
options={'num_ctx': 8192}, # 8192 is the recommended lower limit for the context window
143+
stream=True,
144+
)
145+
146+
tool_calls = []
147+
thinking = ''
148+
content = ''
149+
150+
for chunk in response_stream:
151+
if chunk.message.tool_calls:
152+
tool_calls.extend(chunk.message.tool_calls)
153+
154+
if chunk.message.content:
155+
if not (chunk.message.thinking or chunk.message.thinking == '') and final:
156+
heading('\n\nFinal result: ')
157+
final = False
158+
print(chunk.message.content, end='', flush=True)
159+
160+
if chunk.message.thinking:
161+
thinking += chunk.message.thinking
162+
print(chunk.message.thinking, end='', flush=True)
163+
164+
if thinking != '':
165+
messages.append({'role': 'assistant', 'content': thinking, 'tool_calls': tool_calls})
166+
167+
print()
168+
169+
if tool_calls:
170+
for tool_call in tool_calls:
171+
tool_name = tool_call.function.name
172+
args = tool_call.function.arguments or {}
173+
function_to_call = available_tools.get(tool_name)
174+
175+
if function_to_call:
176+
heading(f'\nCalling tool: {tool_name}')
177+
if args:
178+
print(f'Arguments: {args}')
179+
180+
try:
181+
result = function_to_call(**args)
182+
print(f'Tool result: {result[:200]}')
183+
if len(result) > 200:
184+
heading('... [truncated]')
185+
print()
186+
187+
result_message = {'role': 'tool', 'content': result, 'tool_name': tool_name}
188+
messages.append(result_message)
189+
190+
except Exception as e:
191+
err = f'Error from {tool_name}: {e}'
192+
print(err)
193+
messages.append({'role': 'tool', 'content': err, 'tool_name': tool_name})
194+
else:
195+
print(f'Tool {tool_name} not found')
196+
else:
197+
# no more tool calls, we can stop the loop
198+
break

examples/gpt-oss-tools-browser.py

Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
# /// script
2+
# requires-python = ">=3.11"
3+
# dependencies = [
4+
# "gpt-oss",
5+
# "ollama",
6+
# "rich",
7+
# ]
8+
# ///
9+
10+
import asyncio
11+
import json
12+
from typing import Optional
13+
14+
from gpt_oss.tools.simple_browser import ExaBackend, SimpleBrowserTool
15+
from openai_harmony import Author, Role, TextContent
16+
from openai_harmony import Message as HarmonyMessage
17+
18+
from ollama import Client
19+
20+
_backend = ExaBackend(source='web')
21+
_browser_tool = SimpleBrowserTool(backend=_backend)
22+
23+
24+
def heading(text):
25+
print(text)
26+
print('=' * (len(text) + 3))
27+
28+
29+
async def _browser_search_async(query: str, topn: int = 10, source: str | None = None) -> str:
30+
# map Ollama message to Harmony format
31+
harmony_message = HarmonyMessage(
32+
author=Author(role=Role.USER),
33+
content=[TextContent(text=json.dumps({'query': query, 'topn': topn}))],
34+
recipient='browser.search',
35+
)
36+
37+
result_text: str = ''
38+
async for response in _browser_tool._process(harmony_message):
39+
if response.content:
40+
for content in response.content:
41+
if isinstance(content, TextContent):
42+
result_text += content.text
43+
return result_text or f'No results for query: {query}'
44+
45+
46+
async def _browser_open_async(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: str | None = None) -> str:
47+
payload = {'id': id, 'cursor': cursor, 'loc': loc, 'num_lines': num_lines, 'view_source': view_source, 'source': source}
48+
49+
harmony_message = HarmonyMessage(
50+
author=Author(role=Role.USER),
51+
content=[TextContent(text=json.dumps(payload))],
52+
recipient='browser.open',
53+
)
54+
55+
result_text: str = ''
56+
async for response in _browser_tool._process(harmony_message):
57+
if response.content:
58+
for content in response.content:
59+
if isinstance(content, TextContent):
60+
result_text += content.text
61+
return result_text or f'Could not open: {id}'
62+
63+
64+
async def _browser_find_async(pattern: str, cursor: int = -1) -> str:
65+
payload = {'pattern': pattern, 'cursor': cursor}
66+
67+
harmony_message = HarmonyMessage(
68+
author=Author(role=Role.USER),
69+
content=[TextContent(text=json.dumps(payload))],
70+
recipient='browser.find',
71+
)
72+
73+
result_text: str = ''
74+
async for response in _browser_tool._process(harmony_message):
75+
if response.content:
76+
for content in response.content:
77+
if isinstance(content, TextContent):
78+
result_text += content.text
79+
return result_text or f'Pattern not found: {pattern}'
80+
81+
82+
def browser_search(query: str, topn: int = 10, source: Optional[str] = None) -> str:
83+
return asyncio.run(_browser_search_async(query=query, topn=topn, source=source))
84+
85+
86+
def browser_open(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: Optional[str] = None) -> str:
87+
return asyncio.run(_browser_open_async(id=id, cursor=cursor, loc=loc, num_lines=num_lines, view_source=view_source, source=source))
88+
89+
90+
def browser_find(pattern: str, cursor: int = -1) -> str:
91+
return asyncio.run(_browser_find_async(pattern=pattern, cursor=cursor))
92+
93+
94+
# Schema definitions for each browser tool
95+
browser_search_schema = {
96+
'type': 'function',
97+
'function': {
98+
'name': 'browser.search',
99+
},
100+
}
101+
102+
browser_open_schema = {
103+
'type': 'function',
104+
'function': {
105+
'name': 'browser.open',
106+
},
107+
}
108+
109+
browser_find_schema = {
110+
'type': 'function',
111+
'function': {
112+
'name': 'browser.find',
113+
},
114+
}
115+
116+
available_tools = {
117+
'browser.search': browser_search,
118+
'browser.open': browser_open,
119+
'browser.find': browser_find,
120+
}
121+
122+
123+
model = 'gpt-oss:20b'
124+
print('Model: ', model, '\n')
125+
126+
prompt = 'What is Ollama?'
127+
print('You: ', prompt, '\n')
128+
messages = [{'role': 'user', 'content': prompt}]
129+
130+
client = Client()
131+
while True:
132+
response = client.chat(
133+
model=model,
134+
messages=messages,
135+
tools=[browser_search_schema, browser_open_schema, browser_find_schema],
136+
options={'num_ctx': 8192}, # 8192 is the recommended lower limit for the context window
137+
)
138+
139+
if hasattr(response.message, 'thinking') and response.message.thinking:
140+
heading('Thinking')
141+
print(response.message.thinking.strip() + '\n')
142+
143+
if hasattr(response.message, 'content') and response.message.content:
144+
heading('Assistant')
145+
print(response.message.content.strip() + '\n')
146+
147+
# add message to chat history
148+
messages.append(response.message)
149+
150+
if response.message.tool_calls:
151+
for tool_call in response.message.tool_calls:
152+
tool_name = tool_call.function.name
153+
args = tool_call.function.arguments or {}
154+
function_to_call = available_tools.get(tool_name)
155+
if not function_to_call:
156+
print(f'Unknown tool: {tool_name}')
157+
continue
158+
159+
try:
160+
result = function_to_call(**args)
161+
heading(f'Tool: {tool_name}')
162+
if args:
163+
print(f'Arguments: {args}')
164+
print(result[:200])
165+
if len(result) > 200:
166+
print('... [truncated]')
167+
print()
168+
messages.append({'role': 'tool', 'content': result, 'tool_name': tool_name})
169+
except Exception as e:
170+
err = f'Error from {tool_name}: {e}'
171+
print(err)
172+
messages.append({'role': 'tool', 'content': err, 'tool_name': tool_name})
173+
else:
174+
# break on no more tool calls
175+
break

0 commit comments

Comments
 (0)