|
12 | 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | 13 | # See the License for the specific language governing permissions and |
14 | 14 | # limitations under the License. |
| 15 | +from typing import Any |
15 | 16 | from unittest.mock import MagicMock, Mock, patch |
16 | 17 |
|
17 | 18 | import ollama |
18 | 19 | import pytest |
| 20 | +from neo4j_graphrag.exceptions import LLMGenerationError |
19 | 21 | from neo4j_graphrag.llm import LLMResponse |
20 | 22 | from neo4j_graphrag.llm.ollama_llm import OllamaLLM |
21 | 23 |
|
22 | 24 |
|
23 | | -def get_mock_ollama() -> MagicMock: |
24 | | - mock = MagicMock() |
25 | | - mock.ResponseError = ollama.ResponseError |
26 | | - return mock |
27 | | - |
28 | | - |
29 | | -@patch("builtins.__import__", side_effect=ImportError) |
30 | | -def test_ollama_llm_missing_dependency(mock_import: Mock) -> None: |
| 25 | +@patch("neo4j_graphrag.llm.ollama_llm.ollama", None) |
| 26 | +def test_ollama_llm_missing_dependency() -> None: |
31 | 27 | with pytest.raises(ImportError): |
32 | 28 | OllamaLLM(model_name="gpt-4o") |
33 | 29 |
|
34 | 30 |
|
35 | | -@patch("builtins.__import__") |
36 | | -def test_ollama_llm_happy_path(mock_import: Mock) -> None: |
37 | | - mock_ollama = get_mock_ollama() |
38 | | - mock_import.return_value = mock_ollama |
| 31 | +@patch("neo4j_graphrag.llm.ollama_llm.ollama") |
| 32 | +def test_ollama_llm_happy_path(mock_ollama: Mock) -> None: |
| 33 | + mock_ollama.Client.return_value.chat.return_value = MagicMock( |
| 34 | + message=MagicMock(content="ollama chat response"), |
| 35 | + ) |
| 36 | + model = "gpt" |
| 37 | + model_params = {"temperature": 0.3} |
| 38 | + system_instruction = "You are a helpful assistant." |
| 39 | + question = "What is graph RAG?" |
| 40 | + llm = OllamaLLM( |
| 41 | + model, |
| 42 | + model_params=model_params, |
| 43 | + system_instruction=system_instruction, |
| 44 | + ) |
| 45 | + |
| 46 | + res = llm.invoke(question) |
| 47 | + assert isinstance(res, LLMResponse) |
| 48 | + assert res.content == "ollama chat response" |
| 49 | + messages = [ |
| 50 | + {"role": "system", "content": system_instruction}, |
| 51 | + {"role": "user", "content": question}, |
| 52 | + ] |
| 53 | + llm.client.chat.assert_called_once_with( |
| 54 | + model=model, messages=messages, options=model_params |
| 55 | + ) |
| 56 | + |
| 57 | + |
| 58 | +@patch("neo4j_graphrag.llm.ollama_llm.ollama") |
| 59 | +def test_ollama_invoke_with_chat_history_happy_path(mock_ollama: Mock) -> None: |
| 60 | + mock_ollama.Client.return_value.chat.return_value = MagicMock( |
| 61 | + message=MagicMock(content="ollama chat response"), |
| 62 | + ) |
| 63 | + model = "gpt" |
| 64 | + model_params = {"temperature": 0.3} |
| 65 | + system_instruction = "You are a helpful assistant." |
| 66 | + llm = OllamaLLM( |
| 67 | + model, |
| 68 | + model_params=model_params, |
| 69 | + system_instruction=system_instruction, |
| 70 | + ) |
| 71 | + chat_history = [ |
| 72 | + {"role": "user", "content": "When does the sun come up in the summer?"}, |
| 73 | + {"role": "assistant", "content": "Usually around 6am."}, |
| 74 | + ] |
| 75 | + question = "What about next season?" |
| 76 | + |
| 77 | + response = llm.invoke(question, chat_history) |
| 78 | + assert response.content == "ollama chat response" |
| 79 | + messages = [{"role": "system", "content": system_instruction}] |
| 80 | + messages.extend(chat_history) |
| 81 | + messages.append({"role": "user", "content": question}) |
| 82 | + llm.client.chat.assert_called_once_with( |
| 83 | + model=model, messages=messages, options=model_params |
| 84 | + ) |
| 85 | + |
| 86 | + |
| 87 | +@patch("neo4j_graphrag.llm.ollama_llm.ollama") |
| 88 | +def test_ollama_invoke_with_chat_history_validation_error( |
| 89 | + mock_ollama: Mock, |
| 90 | +) -> None: |
39 | 91 | mock_ollama.Client.return_value.chat.return_value = MagicMock( |
40 | 92 | message=MagicMock(content="ollama chat response"), |
41 | 93 | ) |
42 | | - llm = OllamaLLM(model_name="gpt") |
| 94 | + mock_ollama.ResponseError = ollama.ResponseError |
| 95 | + model = "gpt" |
| 96 | + model_params = {"temperature": 0.3} |
| 97 | + system_instruction = "You are a helpful assistant." |
| 98 | + llm = OllamaLLM( |
| 99 | + model, |
| 100 | + model_params=model_params, |
| 101 | + system_instruction=system_instruction, |
| 102 | + ) |
| 103 | + chat_history = [ |
| 104 | + {"role": "human", "content": "When does the sun come up in the summer?"}, |
| 105 | + {"role": "assistant", "content": "Usually around 6am."}, |
| 106 | + ] |
| 107 | + question = "What about next season?" |
| 108 | + |
| 109 | + with pytest.raises(LLMGenerationError) as exc_info: |
| 110 | + llm.invoke(question, chat_history) |
| 111 | + assert "Input should be 'user', 'assistant' or 'system" in str(exc_info.value) |
| 112 | + |
| 113 | + |
| 114 | +@pytest.mark.asyncio |
| 115 | +@patch("neo4j_graphrag.llm.ollama_llm.ollama") |
| 116 | +async def test_ollama_ainvoke_happy_path(mock_ollama: Mock) -> None: |
| 117 | + async def mock_chat_async(*args: Any, **kwargs: Any) -> MagicMock: |
| 118 | + return MagicMock( |
| 119 | + message=MagicMock(content="ollama chat response"), |
| 120 | + ) |
| 121 | + |
| 122 | + mock_ollama.AsyncClient.return_value.chat = mock_chat_async |
| 123 | + model = "gpt" |
| 124 | + model_params = {"temperature": 0.3} |
| 125 | + system_instruction = "You are a helpful assistant." |
| 126 | + question = "What is graph RAG?" |
| 127 | + llm = OllamaLLM( |
| 128 | + model, |
| 129 | + model_params=model_params, |
| 130 | + system_instruction=system_instruction, |
| 131 | + ) |
43 | 132 |
|
44 | | - res = llm.invoke("my text") |
| 133 | + res = await llm.ainvoke(question) |
45 | 134 | assert isinstance(res, LLMResponse) |
46 | 135 | assert res.content == "ollama chat response" |
0 commit comments