|
6 | 6 | from typing import List, Dict, Optional, Literal, Union, Iterator, cast |
7 | 7 | import json |
8 | 8 | import datetime |
| 9 | +import copy # Import copy for deepcopy |
9 | 10 |
|
10 | 11 | from n0mail import config |
11 | 12 | from n0mail.util.text_utils import clean_markdown_for_dense_information |
@@ -290,20 +291,43 @@ def get_chat_completion( |
290 | 291 | if temperature is not None: |
291 | 292 | options['temperature'] = temperature |
292 | 293 |
|
| 294 | + # --- Pre-process messages for Ollama client --- |
| 295 | + # The ollama client expects arguments within tool_calls to be dicts, |
| 296 | + # but our history (adapted from OpenAI format) stores them as strings. |
| 297 | + messages_for_ollama = copy.deepcopy(messages) # Avoid modifying original history |
| 298 | + for message in messages_for_ollama: |
| 299 | + if message.get('role') == 'assistant' and message.get('tool_calls'): |
| 300 | + new_tool_calls = [] |
| 301 | + for tool_call in message['tool_calls']: |
| 302 | + if isinstance(tool_call, dict) and isinstance(tool_call.get('function'), dict): |
| 303 | + func = tool_call['function'] |
| 304 | + if isinstance(func.get('arguments'), str): |
| 305 | + try: |
| 306 | + # Parse arguments string back to dict |
| 307 | + func['arguments'] = json.loads(func['arguments']) |
| 308 | + except json.JSONDecodeError: |
| 309 | + console.print(f"[yellow]Warning (Ollama):[/yellow] Could not parse tool call arguments JSON string in history message: {func.get('arguments')}") |
| 310 | + # Keep it as string if parsing fails? Or remove the call? |
| 311 | + # Keeping it might lead to the same validation error again. |
| 312 | + # Let's try keeping the structure but arguments might be wrong. |
| 313 | + pass # Keep as string if parse fails |
| 314 | + # Append the potentially modified tool_call (or original if no change needed) |
| 315 | + # We assume the rest of the tool_call structure is compatible |
| 316 | + new_tool_calls.append(tool_call) |
| 317 | + else: |
| 318 | + # If tool_call format is unexpected, keep original |
| 319 | + new_tool_calls.append(tool_call) |
| 320 | + message['tool_calls'] = new_tool_calls # Update message with processed tool calls |
| 321 | + |
293 | 322 | chat_params = { |
294 | 323 | "model": model, |
295 | | - "messages": messages, |
| 324 | + "messages": messages_for_ollama, # Use the pre-processed messages |
296 | 325 | "stream": stream, |
297 | 326 | "options": options |
298 | 327 | } |
299 | 328 |
|
300 | | - # Pass tools if provided - using the OpenAI JSON schema format |
301 | 329 | if tools: |
302 | 330 | chat_params["tools"] = tools |
303 | | - # Note: Ollama python lib might not support explicit 'tool_choice' yet |
304 | | - # if tool_choice and tool_choice != "auto": |
305 | | - # chat_params["tool_choice"] = tool_choice |
306 | | - # For now, we rely on the model to decide based on the prompt and tools |
307 | 331 |
|
308 | 332 | retries = 0 |
309 | 333 | delay = initial_delay |
|
0 commit comments