|
1 | 1 | """Test chat model integration."""
|
2 | 2 |
|
3 | 3 | import json
|
| 4 | +import logging |
4 | 5 | from collections.abc import Generator
|
5 | 6 | from contextlib import contextmanager
|
6 | 7 | from typing import Any
|
7 |
| -from unittest.mock import patch |
| 8 | +from unittest.mock import MagicMock, patch |
8 | 9 |
|
9 | 10 | import pytest
|
10 | 11 | from httpx import Client, Request, Response
|
11 | 12 | from langchain_core.exceptions import OutputParserException
|
12 |
| -from langchain_core.messages import ChatMessage |
| 13 | +from langchain_core.messages import ChatMessage, HumanMessage |
13 | 14 | from langchain_tests.unit_tests import ChatModelUnitTests
|
14 | 15 |
|
15 | 16 | from langchain_ollama.chat_models import (
|
@@ -140,3 +141,130 @@ def test_parse_json_string_skip_returns_input_on_failure() -> None:
|
140 | 141 | skip=True,
|
141 | 142 | )
|
142 | 143 | assert result == malformed_string
|
| 144 | + |
| 145 | + |
| 146 | +def test_load_response_with_empty_content_is_skipped( |
| 147 | + caplog: pytest.LogCaptureFixture, |
| 148 | +) -> None: |
| 149 | + """Test that load responses with empty content log a warning and are skipped.""" |
| 150 | + load_only_response = [ |
| 151 | + { |
| 152 | + "model": "test-model", |
| 153 | + "created_at": "2025-01-01T00:00:00.000000000Z", |
| 154 | + "done": True, |
| 155 | + "done_reason": "load", |
| 156 | + "message": {"role": "assistant", "content": ""}, |
| 157 | + } |
| 158 | + ] |
| 159 | + |
| 160 | + with patch("langchain_ollama.chat_models.Client") as mock_client_class: |
| 161 | + mock_client = MagicMock() |
| 162 | + mock_client_class.return_value = mock_client |
| 163 | + mock_client.chat.return_value = load_only_response |
| 164 | + |
| 165 | + llm = ChatOllama(model="test-model") |
| 166 | + |
| 167 | + with ( |
| 168 | + caplog.at_level(logging.WARNING), |
| 169 | + pytest.raises(ValueError, match="No data received from Ollama stream"), |
| 170 | + ): |
| 171 | + llm.invoke([HumanMessage("Hello")]) |
| 172 | + |
| 173 | + assert "Ollama returned empty response with done_reason='load'" in caplog.text |
| 174 | + |
| 175 | + |
| 176 | +def test_load_response_with_whitespace_content_is_skipped( |
| 177 | + caplog: pytest.LogCaptureFixture, |
| 178 | +) -> None: |
| 179 | + """Test load responses w/ only whitespace content log a warning and are skipped.""" |
| 180 | + load_whitespace_response = [ |
| 181 | + { |
| 182 | + "model": "test-model", |
| 183 | + "created_at": "2025-01-01T00:00:00.000000000Z", |
| 184 | + "done": True, |
| 185 | + "done_reason": "load", |
| 186 | + "message": {"role": "assistant", "content": " \n \t "}, |
| 187 | + } |
| 188 | + ] |
| 189 | + |
| 190 | + with patch("langchain_ollama.chat_models.Client") as mock_client_class: |
| 191 | + mock_client = MagicMock() |
| 192 | + mock_client_class.return_value = mock_client |
| 193 | + mock_client.chat.return_value = load_whitespace_response |
| 194 | + |
| 195 | + llm = ChatOllama(model="test-model") |
| 196 | + |
| 197 | + with ( |
| 198 | + caplog.at_level(logging.WARNING), |
| 199 | + pytest.raises(ValueError, match="No data received from Ollama stream"), |
| 200 | + ): |
| 201 | + llm.invoke([HumanMessage("Hello")]) |
| 202 | + assert "Ollama returned empty response with done_reason='load'" in caplog.text |
| 203 | + |
| 204 | + |
| 205 | +def test_load_followed_by_content_response( |
| 206 | + caplog: pytest.LogCaptureFixture, |
| 207 | +) -> None: |
| 208 | + """Test load responses log a warning and are skipped when followed by content.""" |
| 209 | + load_then_content_response = [ |
| 210 | + { |
| 211 | + "model": "test-model", |
| 212 | + "created_at": "2025-01-01T00:00:00.000000000Z", |
| 213 | + "done": True, |
| 214 | + "done_reason": "load", |
| 215 | + "message": {"role": "assistant", "content": ""}, |
| 216 | + }, |
| 217 | + { |
| 218 | + "model": "test-model", |
| 219 | + "created_at": "2025-01-01T00:00:01.000000000Z", |
| 220 | + "done": True, |
| 221 | + "done_reason": "stop", |
| 222 | + "message": { |
| 223 | + "role": "assistant", |
| 224 | + "content": "Hello! How can I help you today?", |
| 225 | + }, |
| 226 | + }, |
| 227 | + ] |
| 228 | + |
| 229 | + with patch("langchain_ollama.chat_models.Client") as mock_client_class: |
| 230 | + mock_client = MagicMock() |
| 231 | + mock_client_class.return_value = mock_client |
| 232 | + mock_client.chat.return_value = load_then_content_response |
| 233 | + |
| 234 | + llm = ChatOllama(model="test-model") |
| 235 | + |
| 236 | + with caplog.at_level(logging.WARNING): |
| 237 | + result = llm.invoke([HumanMessage("Hello")]) |
| 238 | + |
| 239 | + assert "Ollama returned empty response with done_reason='load'" in caplog.text |
| 240 | + assert result.content == "Hello! How can I help you today?" |
| 241 | + assert result.response_metadata.get("done_reason") == "stop" |
| 242 | + |
| 243 | + |
| 244 | +def test_load_response_with_actual_content_is_not_skipped( |
| 245 | + caplog: pytest.LogCaptureFixture, |
| 246 | +) -> None: |
| 247 | + """Test load responses with actual content are NOT skipped and log no warning.""" |
| 248 | + load_with_content_response = [ |
| 249 | + { |
| 250 | + "model": "test-model", |
| 251 | + "created_at": "2025-01-01T00:00:00.000000000Z", |
| 252 | + "done": True, |
| 253 | + "done_reason": "load", |
| 254 | + "message": {"role": "assistant", "content": "This is actual content"}, |
| 255 | + } |
| 256 | + ] |
| 257 | + |
| 258 | + with patch("langchain_ollama.chat_models.Client") as mock_client_class: |
| 259 | + mock_client = MagicMock() |
| 260 | + mock_client_class.return_value = mock_client |
| 261 | + mock_client.chat.return_value = load_with_content_response |
| 262 | + |
| 263 | + llm = ChatOllama(model="test-model") |
| 264 | + |
| 265 | + with caplog.at_level(logging.WARNING): |
| 266 | + result = llm.invoke([HumanMessage("Hello")]) |
| 267 | + |
| 268 | + assert result.content == "This is actual content" |
| 269 | + assert result.response_metadata.get("done_reason") == "load" |
| 270 | + assert not caplog.text |
0 commit comments