Skip to content

Commit 572020c

Browse files
authored
ollama: add validate_model_on_init, catch more errors (#31784)
* Ensure access to local model during `ChatOllama` instantiation (#27720). This adds a new param `validate_model_on_init` (default: `true`) * Catch a few more errors from the Ollama client to assist users
1 parent 1a3a8db commit 572020c

File tree

10 files changed

+188
-3
lines changed

10 files changed

+188
-3
lines changed
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
"""Utility functions for validating Ollama models."""
2+
3+
from httpx import ConnectError
4+
from ollama import Client, ResponseError
5+
6+
7+
def validate_model(client: Client, model_name: str) -> None:
8+
"""Validate that a model exists in the Ollama instance.
9+
10+
Args:
11+
client: The Ollama client.
12+
model_name: The name of the model to validate.
13+
14+
Raises:
15+
ValueError: If the model is not found or if there's a connection issue.
16+
"""
17+
try:
18+
response = client.list()
19+
model_names: list[str] = [model["name"] for model in response["models"]]
20+
if not any(
21+
model_name == m or m.startswith(f"{model_name}:") for m in model_names
22+
):
23+
raise ValueError(
24+
f"Model `{model_name}` not found in Ollama. Please pull the "
25+
f"model (using `ollama pull {model_name}`) or specify a valid "
26+
f"model name. Available local models: {', '.join(model_names)}"
27+
)
28+
except ConnectError as e:
29+
raise ValueError(
30+
"Connection to Ollama failed. Please make sure Ollama is running "
31+
f"and accessible at {client._client.base_url}. "
32+
) from e
33+
except ResponseError as e:
34+
raise ValueError(
35+
"Received an error from the Ollama API. "
36+
"Please check your Ollama server logs."
37+
) from e

libs/partners/ollama/langchain_ollama/chat_models.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,8 @@
5555
from pydantic.v1 import BaseModel as BaseModelV1
5656
from typing_extensions import Self, is_typeddict
5757

58+
from ._utils import validate_model
59+
5860
DEFAULT_THINK_TOKEN_START: Final[str] = "<think>"
5961
DEFAULT_THINK_TOKEN_END: Final[str] = "</think>"
6062

@@ -350,6 +352,9 @@ class Multiply(BaseModel):
350352
model: str
351353
"""Model name to use."""
352354

355+
validate_model_on_init: bool = False
356+
"""Whether to validate the model exists in Ollama locally on initialization."""
357+
353358
extract_reasoning: Optional[Union[bool, tuple[str, str]]] = False
354359
"""Whether to extract the reasoning tokens in think blocks.
355360
Extracts `chunk.content` to `chunk.additional_kwargs.reasoning_content`.
@@ -529,6 +534,8 @@ def _set_clients(self) -> Self:
529534

530535
self._client = Client(host=self.base_url, **sync_client_kwargs)
531536
self._async_client = AsyncClient(host=self.base_url, **async_client_kwargs)
537+
if self.validate_model_on_init:
538+
validate_model(self._client, self.model)
532539
return self
533540

534541
def _convert_messages_to_ollama_messages(
@@ -1226,7 +1233,7 @@ class AnswerWithJustification(BaseModel):
12261233
"schema": schema,
12271234
},
12281235
)
1229-
output_parser = PydanticOutputParser(pydantic_object=schema)
1236+
output_parser = PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type]
12301237
else:
12311238
if is_typeddict(schema):
12321239
response_format = convert_to_json_schema(schema)

libs/partners/ollama/langchain_ollama/embeddings.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
)
1313
from typing_extensions import Self
1414

15+
from ._utils import validate_model
16+
1517

1618
class OllamaEmbeddings(BaseModel, Embeddings):
1719
"""Ollama embedding model integration.
@@ -123,6 +125,9 @@ class OllamaEmbeddings(BaseModel, Embeddings):
123125
model: str
124126
"""Model name to use."""
125127

128+
validate_model_on_init: bool = False
129+
"""Whether to validate the model exists in ollama locally on initialization."""
130+
126131
base_url: Optional[str] = None
127132
"""Base url the model is hosted under."""
128133

@@ -259,6 +264,8 @@ def _set_clients(self) -> Self:
259264

260265
self._client = Client(host=self.base_url, **sync_client_kwargs)
261266
self._async_client = AsyncClient(host=self.base_url, **async_client_kwargs)
267+
if self.validate_model_on_init:
268+
validate_model(self._client, self.model)
262269
return self
263270

264271
def embed_documents(self, texts: list[str]) -> list[list[float]]:

libs/partners/ollama/langchain_ollama/llms.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
from pydantic import PrivateAttr, model_validator
1919
from typing_extensions import Self
2020

21+
from ._utils import validate_model
22+
2123

2224
class OllamaLLM(BaseLLM):
2325
"""OllamaLLM large language models.
@@ -34,6 +36,9 @@ class OllamaLLM(BaseLLM):
3436
model: str
3537
"""Model name to use."""
3638

39+
validate_model_on_init: bool = False
40+
"""Whether to validate the model exists in ollama locally on initialization."""
41+
3742
mirostat: Optional[int] = None
3843
"""Enable Mirostat sampling for controlling perplexity.
3944
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
@@ -215,6 +220,8 @@ def _set_clients(self) -> Self:
215220

216221
self._client = Client(host=self.base_url, **sync_client_kwargs)
217222
self._async_client = AsyncClient(host=self.base_url, **async_client_kwargs)
223+
if self.validate_model_on_init:
224+
validate_model(self._client, self.model)
218225
return self
219226

220227
async def _acreate_generate_stream(
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
interactions:
2+
- request:
3+
body: ''
4+
headers:
5+
accept:
6+
- application/json
7+
accept-encoding:
8+
- gzip, deflate, zstd
9+
connection:
10+
- keep-alive
11+
content-type:
12+
- application/json
13+
host:
14+
- 127.0.0.1:11434
15+
user-agent:
16+
- ollama-python/0.5.1 (arm64 darwin) Python/3.10.16
17+
method: GET
18+
uri: http://127.0.0.1:11434/api/tags
19+
response:
20+
body:
21+
string: '{"models":[{"name":"deepseek-r1:8b","model":"deepseek-r1:8b","modified_at":"2025-06-28T01:12:36.619720716-04:00","size":5225376047,"digest":"6995872bfe4c521a67b32da386cd21d5c6e819b6e0d62f79f64ec83be99f5763","details":{"parent_model":"","format":"gguf","family":"qwen3","families":["qwen3"],"parameter_size":"8.2B","quantization_level":"Q4_K_M"}},{"name":"deepseek-r1:1.5b","model":"deepseek-r1:1.5b","modified_at":"2025-06-28T01:12:14.502483098-04:00","size":1117322768,"digest":"e0979632db5a88d1a53884cb2a941772d10ff5d055aabaa6801c4e36f3a6c2d7","details":{"parent_model":"","format":"gguf","family":"qwen2","families":["qwen2"],"parameter_size":"1.8B","quantization_level":"Q4_K_M"}},{"name":"granite3.2:8b","model":"granite3.2:8b","modified_at":"2025-06-25T14:56:40.551100022-04:00","size":4942877287,"digest":"9bcb3335083f7eecc742d3916da858f66e6ba8dc450a233270f37ba2ecec6c79","details":{"parent_model":"","format":"gguf","family":"granite","families":["granite"],"parameter_size":"8.2B","quantization_level":"Q4_K_M"}},{"name":"bakllava:latest","model":"bakllava:latest","modified_at":"2025-06-25T14:53:32.313094104-04:00","size":4733351307,"digest":"3dd68bd4447cba20e20deba918749e7f58ff689a8ba4a90c9ff9dc9118037486","details":{"parent_model":"","format":"gguf","family":"llama","families":["llama","clip"],"parameter_size":"7B","quantization_level":"Q4_0"}},{"name":"qwen3:14b","model":"qwen3:14b","modified_at":"2025-06-24T15:23:01.652116724-04:00","size":9276198565,"digest":"bdbd181c33f2ed1b31c972991882db3cf4d192569092138a7d29e973cd9debe8","details":{"parent_model":"","format":"gguf","family":"qwen3","families":["qwen3"],"parameter_size":"14.8B","quantization_level":"Q4_K_M"}},{"name":"deepseek-r1:latest","model":"deepseek-r1:latest","modified_at":"2025-06-24T14:38:30.266396429-04:00","size":5225376047,"digest":"6995872bfe4c521a67b32da386cd21d5c6e819b6e0d62f79f64ec83be99f5763","details":{"parent_model":"","format":"gguf","family":"qwen3","families":["qwen3"],"parameter_size":"8.2B","quantization_level":"Q4_K_M"}},{"name":"gemma3:latest","model":"gemma3:latest","modified_at":"2025-06-24T14:00:47.814400435-04:00","size":3338801804,"digest":"a2af6cc3eb7fa8be8504abaf9b04e88f17a119ec3f04a3addf55f92841195f5a","details":{"parent_model":"","format":"gguf","family":"gemma3","families":["gemma3"],"parameter_size":"4.3B","quantization_level":"Q4_K_M"}},{"name":"qwen3:8b","model":"qwen3:8b","modified_at":"2025-06-24T13:41:32.032308856-04:00","size":5225388164,"digest":"500a1f067a9f782620b40bee6f7b0c89e17ae61f686b92c24933e4ca4b2b8b41","details":{"parent_model":"","format":"gguf","family":"qwen3","families":["qwen3"],"parameter_size":"8.2B","quantization_level":"Q4_K_M"}},{"name":"llama4:latest","model":"llama4:latest","modified_at":"2025-06-24T11:56:25.773177793-04:00","size":67436862523,"digest":"bf31604e25c25d964e250bcf28a82bfbdbe88af5f236257fabb27629bb24c7f3","details":{"parent_model":"","format":"gguf","family":"llama4","families":["llama4"],"parameter_size":"108.6B","quantization_level":"Q4_K_M"}},{"name":"granite3.2-vision:latest","model":"granite3.2-vision:latest","modified_at":"2025-06-24T11:19:40.600433668-04:00","size":2437852465,"digest":"3be41a661804ad72cd08269816c5a145f1df6479ad07e2b3a7e29dba575d2669","details":{"parent_model":"","format":"gguf","family":"granite","families":["granite","clip"],"parameter_size":"2.5B","quantization_level":"Q4_K_M"}},{"name":"mistral-small3.2:latest","model":"mistral-small3.2:latest","modified_at":"2025-06-24T11:16:17.938210984-04:00","size":15177384862,"digest":"5a408ab55df5c1b5cf46533c368813b30bf9e4d8fc39263bf2a3338cfa3b895b","details":{"parent_model":"","format":"gguf","family":"mistral3","families":["mistral3"],"parameter_size":"24.0B","quantization_level":"Q4_K_M"}},{"name":"mistral-small3.1:latest","model":"mistral-small3.1:latest","modified_at":"2025-06-24T11:07:35.44539952-04:00","size":15486899116,"digest":"b9aaf0c2586a8ed8105feab808c0f034bd4d346203822f048e2366165a13f4ea","details":{"parent_model":"","format":"gguf","family":"mistral3","families":["mistral3"],"parameter_size":"24.0B","quantization_level":"Q4_K_M"}},{"name":"gemma3:4b","model":"gemma3:4b","modified_at":"2025-06-23T17:23:28.663213497-04:00","size":3338801804,"digest":"a2af6cc3eb7fa8be8504abaf9b04e88f17a119ec3f04a3addf55f92841195f5a","details":{"parent_model":"","format":"gguf","family":"gemma3","families":["gemma3"],"parameter_size":"4.3B","quantization_level":"Q4_K_M"}},{"name":"llama3:latest","model":"llama3:latest","modified_at":"2025-06-23T17:20:14.737102442-04:00","size":4661224676,"digest":"365c0bd3c000a25d28ddbf732fe1c6add414de7275464c4e4d1c3b5fcb5d8ad1","details":{"parent_model":"","format":"gguf","family":"llama","families":["llama"],"parameter_size":"8.0B","quantization_level":"Q4_0"}},{"name":"llama3.1:latest","model":"llama3.1:latest","modified_at":"2025-06-23T17:15:26.037326254-04:00","size":4920753328,"digest":"46e0c10c039e019119339687c3c1757cc81b9da49709a3b3924863ba87ca666e","details":{"parent_model":"","format":"gguf","family":"llama","families":["llama"],"parameter_size":"8.0B","quantization_level":"Q4_K_M"}},{"name":"llama3.2:latest","model":"llama3.2:latest","modified_at":"2025-06-23T17:01:52.264371207-04:00","size":2019393189,"digest":"a80c4f17acd55265feec403c7aef86be0c25983ab279d83f3bcd3abbcb5b8b72","details":{"parent_model":"","format":"gguf","family":"llama","families":["llama"],"parameter_size":"3.2B","quantization_level":"Q4_K_M"}}]}'
22+
headers:
23+
Content-Type:
24+
- application/json; charset=utf-8
25+
Date:
26+
- Sat, 28 Jun 2025 21:08:54 GMT
27+
Transfer-Encoding:
28+
- chunked
29+
status:
30+
code: 200
31+
message: OK
32+
version: 1

libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,13 @@
11
"""Test chat model integration using standard integration tests."""
22

3+
from unittest.mock import MagicMock, patch
4+
35
import pytest
6+
from httpx import ConnectError
47
from langchain_core.language_models import BaseChatModel
58
from langchain_tests.integration_tests import ChatModelIntegrationTests
9+
from ollama import ResponseError
10+
from pydantic import ValidationError
611

712
from langchain_ollama.chat_models import ChatOllama
813

@@ -47,3 +52,29 @@ def test_tool_calling(self, model: BaseChatModel) -> None:
4752
)
4853
async def test_tool_calling_async(self, model: BaseChatModel) -> None:
4954
await super().test_tool_calling_async(model)
55+
56+
@patch("langchain_ollama.chat_models.Client.list")
57+
def test_init_model_not_found(self, mock_list: MagicMock) -> None:
58+
"""Test that a ValueError is raised when the model is not found."""
59+
mock_list.side_effect = ValueError("Test model not found")
60+
with pytest.raises(ValueError) as excinfo:
61+
ChatOllama(model="non-existent-model", validate_model_on_init=True)
62+
assert "Test model not found" in str(excinfo.value)
63+
64+
@patch("langchain_ollama.chat_models.Client.list")
65+
def test_init_connection_error(self, mock_list: MagicMock) -> None:
66+
"""Test that a ValidationError is raised on connect failure during init."""
67+
mock_list.side_effect = ConnectError("Test connection error")
68+
69+
with pytest.raises(ValidationError) as excinfo:
70+
ChatOllama(model="any-model", validate_model_on_init=True)
71+
assert "not found in Ollama" in str(excinfo.value)
72+
73+
@patch("langchain_ollama.chat_models.Client.list")
74+
def test_init_response_error(self, mock_list: MagicMock) -> None:
75+
"""Test that a ResponseError is raised."""
76+
mock_list.side_effect = ResponseError("Test response error")
77+
78+
with pytest.raises(ValidationError) as excinfo:
79+
ChatOllama(model="any-model", validate_model_on_init=True)
80+
assert "Received an error from the Ollama API" in str(excinfo.value)

libs/partners/ollama/tests/unit_tests/test_chat_models.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from collections.abc import Generator
55
from contextlib import contextmanager
66
from typing import Any
7+
from unittest.mock import patch
78

89
import pytest
910
from httpx import Client, Request, Response
@@ -12,6 +13,8 @@
1213

1314
from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
1415

16+
MODEL_NAME = "llama3.1"
17+
1518

1619
class TestChatOllama(ChatModelUnitTests):
1720
@property
@@ -49,7 +52,7 @@ def test_arbitrary_roles_accepted_in_chatmessages(
4952

5053
llm = ChatOllama(
5154
base_url="http://whocares:11434",
52-
model="granite3.2",
55+
model=MODEL_NAME,
5356
verbose=True,
5457
format=None,
5558
)
@@ -64,3 +67,20 @@ def test_arbitrary_roles_accepted_in_chatmessages(
6467
]
6568

6669
llm.invoke(messages)
70+
71+
72+
@patch("langchain_ollama.chat_models.validate_model")
73+
def test_validate_model_on_init(mock_validate_model: Any) -> None:
74+
"""Test that the model is validated on initialization when requested."""
75+
# Test that validate_model is called when validate_model_on_init=True
76+
ChatOllama(model=MODEL_NAME, validate_model_on_init=True)
77+
mock_validate_model.assert_called_once()
78+
mock_validate_model.reset_mock()
79+
80+
# Test that validate_model is NOT called when validate_model_on_init=False
81+
ChatOllama(model=MODEL_NAME, validate_model_on_init=False)
82+
mock_validate_model.assert_not_called()
83+
84+
# Test that validate_model is NOT called by default
85+
ChatOllama(model=MODEL_NAME)
86+
mock_validate_model.assert_not_called()
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,30 @@
11
"""Test embedding model integration."""
22

3+
from typing import Any
4+
from unittest.mock import patch
5+
36
from langchain_ollama.embeddings import OllamaEmbeddings
47

8+
MODEL_NAME = "llama3.1"
9+
510

611
def test_initialization() -> None:
712
"""Test embedding model initialization."""
813
OllamaEmbeddings(model="llama3", keep_alive=1)
14+
15+
16+
@patch("langchain_ollama.embeddings.validate_model")
17+
def test_validate_model_on_init(mock_validate_model: Any) -> None:
18+
"""Test that the model is validated on initialization when requested."""
19+
# Test that validate_model is called when validate_model_on_init=True
20+
OllamaEmbeddings(model=MODEL_NAME, validate_model_on_init=True)
21+
mock_validate_model.assert_called_once()
22+
mock_validate_model.reset_mock()
23+
24+
# Test that validate_model is NOT called when validate_model_on_init=False
25+
OllamaEmbeddings(model=MODEL_NAME, validate_model_on_init=False)
26+
mock_validate_model.assert_not_called()
27+
28+
# Test that validate_model is NOT called by default
29+
OllamaEmbeddings(model=MODEL_NAME)
30+
mock_validate_model.assert_not_called()

libs/partners/ollama/tests/unit_tests/test_llms.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,12 @@
11
"""Test Ollama Chat API wrapper."""
22

3+
from typing import Any
4+
from unittest.mock import patch
5+
36
from langchain_ollama import OllamaLLM
47

8+
MODEL_NAME = "llama3.1"
9+
510

611
def test_initialization() -> None:
712
"""Test integration initialization."""
@@ -26,3 +31,20 @@ def test_model_params() -> None:
2631
"ls_model_name": "llama3",
2732
"ls_max_tokens": 3,
2833
}
34+
35+
36+
@patch("langchain_ollama.llms.validate_model")
37+
def test_validate_model_on_init(mock_validate_model: Any) -> None:
38+
"""Test that the model is validated on initialization when requested."""
39+
# Test that validate_model is called when validate_model_on_init=True
40+
OllamaLLM(model=MODEL_NAME, validate_model_on_init=True)
41+
mock_validate_model.assert_called_once()
42+
mock_validate_model.reset_mock()
43+
44+
# Test that validate_model is NOT called when validate_model_on_init=False
45+
OllamaLLM(model=MODEL_NAME, validate_model_on_init=False)
46+
mock_validate_model.assert_not_called()
47+
48+
# Test that validate_model is NOT called by default
49+
OllamaLLM(model=MODEL_NAME)
50+
mock_validate_model.assert_not_called()

libs/partners/ollama/uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)