Skip to content

Commit de8ceeb

Browse files
authored
Fix bug using Ollama with Agents and None tool_calls in final message (#19844)
* fix bug with agents and ollama where it retrieves tools from ollama message, but is None and tries to check len. This error occurs when you create an agent with streaming = false and give it an ollama llm * added test to check get_tool_calls_from_response gracefully returns if no tools were called * Added more tests for achat and chat with tools. Also fixed tests because sometimes chat was still trying to use song generation tool to respond to greeting even when told not to. Switched to a math tool and result is more consistent with it no using the passed in tool. * Updated version to 0.7.3 * Removed unused tool for test * Removed unused test
1 parent 2c561eb commit de8ceeb

File tree

3 files changed

+52
-4
lines changed

3 files changed

+52
-4
lines changed

llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ def get_tool_calls_from_response(
308308
error_on_no_tool_call: bool = True,
309309
) -> List[ToolSelection]:
310310
"""Predict and call the tool."""
311-
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
311+
tool_calls = response.message.additional_kwargs.get("tool_calls", []) or []
312312
if len(tool_calls) < 1:
313313
if error_on_no_tool_call:
314314
raise ValueError(
@@ -353,7 +353,7 @@ def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
353353

354354
response = dict(response)
355355

356-
tool_calls = response["message"].get("tool_calls", [])
356+
tool_calls = response["message"].get("tool_calls", []) or []
357357
thinking = response["message"].get("thinking", None)
358358
token_counts = self._get_response_token_counts(response)
359359
if token_counts:
@@ -535,7 +535,7 @@ async def achat(
535535

536536
response = dict(response)
537537

538-
tool_calls = response["message"].get("tool_calls", [])
538+
tool_calls = response["message"].get("tool_calls", []) or []
539539
thinking = response["message"].get("thinking", None)
540540
token_counts = self._get_response_token_counts(response)
541541
if token_counts:

llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ dev = [
2727

2828
[project]
2929
name = "llama-index-llms-ollama"
30-
version = "0.7.2"
30+
version = "0.7.3"
3131
description = "llama-index llms ollama integration"
3232
authors = [{name = "Your Name", email = "[email protected]"}]
3333
requires-python = ">=3.9,<4.0"

llama-index-integrations/llms/llama-index-llms-ollama/tests/test_llms_ollama.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,3 +208,51 @@ async def test_async_chat_with_think() -> None:
208208
think = response.message.additional_kwargs.get("thinking", None)
209209
assert think is not None
210210
assert str(think).strip() != ""
211+
212+
213+
@pytest.mark.skipif(
214+
client is None, reason="Ollama client is not available or test model is missing"
215+
)
216+
def test_chat_with_tools_returns_empty_array_if_no_tools_were_called() -> None:
217+
"""Make sure get_tool_calls_from_response can gracefully handle no tools in response"""
218+
llm = Ollama(model=test_model, context_window=1000)
219+
response = llm.chat(
220+
tools=[],
221+
messages=[
222+
ChatMessage(
223+
role="system",
224+
content="You are a useful tool calling agent.",
225+
),
226+
ChatMessage(role="user", content="Hello, how are you?"),
227+
],
228+
)
229+
230+
assert response.message.additional_kwargs.get("tool_calls", []) == []
231+
232+
tool_calls = llm.get_tool_calls_from_response(response, error_on_no_tool_call=False)
233+
assert len(tool_calls) == 0
234+
235+
236+
@pytest.mark.skipif(
237+
client is None, reason="Ollama client is not available or test model is missing"
238+
)
239+
@pytest.mark.asyncio
240+
async def test_async_chat_with_tools_returns_empty_array_if_no_tools_were_called() -> (
241+
None
242+
):
243+
"""
244+
Test that achat returns [] for no tool calls since subsequent processes expect []
245+
instead of None
246+
"""
247+
llm = Ollama(model=test_model, context_window=1000)
248+
response = await llm.achat(
249+
tools=[],
250+
messages=[
251+
ChatMessage(
252+
role="system",
253+
content="You are a useful tool calling agent.",
254+
),
255+
ChatMessage(role="user", content="Hello, how are you?"),
256+
],
257+
)
258+
assert response.message.additional_kwargs.get("tool_calls", []) == []

0 commit comments

Comments
 (0)