|
| 1 | +# /// script |
| 2 | +# requires-python = ">=3.11" |
| 3 | +# dependencies = [ |
| 4 | +# "gpt-oss", |
| 5 | +# "ollama", |
| 6 | +# "rich", |
| 7 | +# ] |
| 8 | +# /// |
| 9 | + |
| 10 | +import asyncio |
| 11 | +import json |
| 12 | +from typing import Iterator, Optional |
| 13 | + |
| 14 | +from gpt_oss.tools.simple_browser import ExaBackend, SimpleBrowserTool |
| 15 | +from openai_harmony import Author, Role, TextContent |
| 16 | +from openai_harmony import Message as HarmonyMessage |
| 17 | +from rich import print |
| 18 | + |
| 19 | +from ollama import Client |
| 20 | +from ollama._types import ChatResponse |
| 21 | + |
| 22 | +_backend = ExaBackend(source='web') |
| 23 | +_browser_tool = SimpleBrowserTool(backend=_backend) |
| 24 | + |
| 25 | + |
| 26 | +def heading(text): |
| 27 | + print(text) |
| 28 | + print('=' * (len(text) + 3)) |
| 29 | + |
| 30 | + |
| 31 | +async def _browser_search_async(query: str, topn: int = 10, source: str | None = None) -> str: |
| 32 | + # map Ollama message to Harmony format |
| 33 | + harmony_message = HarmonyMessage( |
| 34 | + author=Author(role=Role.USER), |
| 35 | + content=[TextContent(text=json.dumps({'query': query, 'topn': topn}))], |
| 36 | + recipient='browser.search', |
| 37 | + ) |
| 38 | + |
| 39 | + result_text: str = '' |
| 40 | + async for response in _browser_tool._process(harmony_message): |
| 41 | + if response.content: |
| 42 | + for content in response.content: |
| 43 | + if isinstance(content, TextContent): |
| 44 | + result_text += content.text |
| 45 | + return result_text or f'No results for query: {query}' |
| 46 | + |
| 47 | + |
| 48 | +async def _browser_open_async(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: str | None = None) -> str: |
| 49 | + payload = {'id': id, 'cursor': cursor, 'loc': loc, 'num_lines': num_lines, 'view_source': view_source, 'source': source} |
| 50 | + |
| 51 | + harmony_message = HarmonyMessage( |
| 52 | + author=Author(role=Role.USER), |
| 53 | + content=[TextContent(text=json.dumps(payload))], |
| 54 | + recipient='browser.open', |
| 55 | + ) |
| 56 | + |
| 57 | + result_text: str = '' |
| 58 | + async for response in _browser_tool._process(harmony_message): |
| 59 | + if response.content: |
| 60 | + for content in response.content: |
| 61 | + if isinstance(content, TextContent): |
| 62 | + result_text += content.text |
| 63 | + return result_text or f'Could not open: {id}' |
| 64 | + |
| 65 | + |
| 66 | +async def _browser_find_async(pattern: str, cursor: int = -1) -> str: |
| 67 | + payload = {'pattern': pattern, 'cursor': cursor} |
| 68 | + |
| 69 | + harmony_message = HarmonyMessage( |
| 70 | + author=Author(role=Role.USER), |
| 71 | + content=[TextContent(text=json.dumps(payload))], |
| 72 | + recipient='browser.find', |
| 73 | + ) |
| 74 | + |
| 75 | + result_text: str = '' |
| 76 | + async for response in _browser_tool._process(harmony_message): |
| 77 | + if response.content: |
| 78 | + for content in response.content: |
| 79 | + if isinstance(content, TextContent): |
| 80 | + result_text += content.text |
| 81 | + return result_text or f'Pattern not found: {pattern}' |
| 82 | + |
| 83 | + |
| 84 | +def browser_search(query: str, topn: int = 10, source: Optional[str] = None) -> str: |
| 85 | + return asyncio.run(_browser_search_async(query=query, topn=topn, source=source)) |
| 86 | + |
| 87 | + |
| 88 | +def browser_open(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: Optional[str] = None) -> str: |
| 89 | + return asyncio.run(_browser_open_async(id=id, cursor=cursor, loc=loc, num_lines=num_lines, view_source=view_source, source=source)) |
| 90 | + |
| 91 | + |
| 92 | +def browser_find(pattern: str, cursor: int = -1) -> str: |
| 93 | + return asyncio.run(_browser_find_async(pattern=pattern, cursor=cursor)) |
| 94 | + |
| 95 | + |
| 96 | +# Schema definitions for each browser tool |
| 97 | +browser_search_schema = { |
| 98 | + 'type': 'function', |
| 99 | + 'function': { |
| 100 | + 'name': 'browser.search', |
| 101 | + }, |
| 102 | +} |
| 103 | + |
| 104 | +browser_open_schema = { |
| 105 | + 'type': 'function', |
| 106 | + 'function': { |
| 107 | + 'name': 'browser.open', |
| 108 | + }, |
| 109 | +} |
| 110 | + |
| 111 | +browser_find_schema = { |
| 112 | + 'type': 'function', |
| 113 | + 'function': { |
| 114 | + 'name': 'browser.find', |
| 115 | + }, |
| 116 | +} |
| 117 | + |
| 118 | +available_tools = { |
| 119 | + 'browser.search': browser_search, |
| 120 | + 'browser.open': browser_open, |
| 121 | + 'browser.find': browser_find, |
| 122 | +} |
| 123 | + |
| 124 | + |
| 125 | +model = 'gpt-oss:20b' |
| 126 | +print('Model: ', model, '\n') |
| 127 | + |
| 128 | +prompt = 'What is Ollama?' |
| 129 | +print('You: ', prompt, '\n') |
| 130 | +messages = [{'role': 'user', 'content': prompt}] |
| 131 | + |
| 132 | +client = Client() |
| 133 | + |
| 134 | +# gpt-oss can call tools while "thinking" |
| 135 | +# a loop is needed to call the tools and get the results |
| 136 | +final = True |
| 137 | +while True: |
| 138 | + response_stream: Iterator[ChatResponse] = client.chat( |
| 139 | + model=model, |
| 140 | + messages=messages, |
| 141 | + tools=[browser_search_schema, browser_open_schema, browser_find_schema], |
| 142 | + options={'num_ctx': 8192}, # 8192 is the recommended lower limit for the context window |
| 143 | + stream=True, |
| 144 | + ) |
| 145 | + |
| 146 | + tool_calls = [] |
| 147 | + thinking = '' |
| 148 | + content = '' |
| 149 | + |
| 150 | + for chunk in response_stream: |
| 151 | + if chunk.message.tool_calls: |
| 152 | + tool_calls.extend(chunk.message.tool_calls) |
| 153 | + |
| 154 | + if chunk.message.content: |
| 155 | + if not (chunk.message.thinking or chunk.message.thinking == '') and final: |
| 156 | + heading('\n\nFinal result: ') |
| 157 | + final = False |
| 158 | + print(chunk.message.content, end='', flush=True) |
| 159 | + |
| 160 | + if chunk.message.thinking: |
| 161 | + thinking += chunk.message.thinking |
| 162 | + print(chunk.message.thinking, end='', flush=True) |
| 163 | + |
| 164 | + if thinking != '': |
| 165 | + messages.append({'role': 'assistant', 'content': thinking, 'tool_calls': tool_calls}) |
| 166 | + |
| 167 | + print() |
| 168 | + |
| 169 | + if tool_calls: |
| 170 | + for tool_call in tool_calls: |
| 171 | + tool_name = tool_call.function.name |
| 172 | + args = tool_call.function.arguments or {} |
| 173 | + function_to_call = available_tools.get(tool_name) |
| 174 | + |
| 175 | + if function_to_call: |
| 176 | + heading(f'\nCalling tool: {tool_name}') |
| 177 | + if args: |
| 178 | + print(f'Arguments: {args}') |
| 179 | + |
| 180 | + try: |
| 181 | + result = function_to_call(**args) |
| 182 | + print(f'Tool result: {result[:200]}') |
| 183 | + if len(result) > 200: |
| 184 | + heading('... [truncated]') |
| 185 | + print() |
| 186 | + |
| 187 | + result_message = {'role': 'tool', 'content': result, 'tool_name': tool_name} |
| 188 | + messages.append(result_message) |
| 189 | + |
| 190 | + except Exception as e: |
| 191 | + err = f'Error from {tool_name}: {e}' |
| 192 | + print(err) |
| 193 | + messages.append({'role': 'tool', 'content': err, 'tool_name': tool_name}) |
| 194 | + else: |
| 195 | + print(f'Tool {tool_name} not found') |
| 196 | + else: |
| 197 | + # no more tool calls, we can stop the loop |
| 198 | + break |
0 commit comments