Skip to content

Commit 4ed6c0f

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 1eb22ad commit 4ed6c0f

File tree

4 files changed

+30
-36
lines changed

4 files changed

+30
-36
lines changed

test/test_cli_mode.py

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,9 @@ def test_cli_mode_uses_llamacpp_when_available(self):
108108

109109
assert result == "CLI response from DeepSeek V3"
110110
self.mock_available.assert_called_once_with(TEST_LLAMACPP_MODEL)
111-
self.mock_chat_llamacpp.assert_called_once_with(TEST_LLAMACPP_MODEL, 'Write a function', system_prompt=None, image_files=None, json_schema=None)
111+
self.mock_chat_llamacpp.assert_called_once_with(
112+
TEST_LLAMACPP_MODEL, 'Write a function', system_prompt=None, image_files=None, json_schema=None
113+
)
112114

113115
def test_cli_mode_fallback_to_ollama_when_unavailable(self):
114116
"""Test CLI mode falls back to ollama when model not available in llama.cpp."""
@@ -119,7 +121,9 @@ def test_cli_mode_fallback_to_ollama_when_unavailable(self):
119121

120122
assert result == "Ollama response from DeepSeek Coder"
121123
self.mock_available.assert_called_once_with(TEST_OLLAMA_MODEL)
122-
self.mock_chat_ollama.assert_called_once_with(TEST_OLLAMA_MODEL, 'Help with coding', system_prompt=None, image_files=None, json_schema=None)
124+
self.mock_chat_ollama.assert_called_once_with(
125+
TEST_OLLAMA_MODEL, 'Help with coding', system_prompt=None, image_files=None, json_schema=None
126+
)
123127

124128
def test_default_mode_is_cli(self):
125129
"""Test that default mode is CLI when no llama_mode specified."""
@@ -130,7 +134,9 @@ def test_default_mode_is_cli(self):
130134

131135
assert result == "Default CLI mode response"
132136
self.mock_available.assert_called_once_with(TEST_LLAMACPP_MODEL)
133-
self.mock_chat_llamacpp.assert_called_once_with(TEST_LLAMACPP_MODEL, 'Help me', system_prompt=None, image_files=None, json_schema=None)
137+
self.mock_chat_llamacpp.assert_called_once_with(
138+
TEST_LLAMACPP_MODEL, 'Help me', system_prompt=None, image_files=None, json_schema=None
139+
)
134140

135141

136142
class TestCLIModeIntegration:
@@ -173,26 +179,18 @@ def test_cli_mode_passes_json_schema_to_ollama(self, tmp_path):
173179
When json_schema is supplied, chat_with_model should forward the parsed
174180
schema (as a dict) to chat_with_ollama.
175181
"""
176-
test_schema = {"schema": {"type":"object","properties":{"answer":{"type":"string"}}}}
182+
test_schema = {"schema": {"type": "object", "properties": {"answer": {"type": "string"}}}}
177183

178184
# Prepare mocks
179-
with patch('ai_server.server.is_llamacpp_available', return_value=False), \
180-
patch('ai_server.server.chat_with_ollama') as mock_ollama:
185+
with patch('ai_server.server.is_llamacpp_available', return_value=False), patch(
186+
'ai_server.server.chat_with_ollama'
187+
) as mock_ollama:
181188
mock_ollama.return_value = "schema-aware response"
182189

183-
result = chat_with_model(
184-
TEST_OLLAMA_MODEL,
185-
"Give me an answer",
186-
llama_mode='cli',
187-
json_schema=test_schema
188-
)
190+
result = chat_with_model(TEST_OLLAMA_MODEL, "Give me an answer", llama_mode='cli', json_schema=test_schema)
189191

190192
assert result == "schema-aware response"
191193

192194
mock_ollama.assert_called_once_with(
193-
TEST_OLLAMA_MODEL,
194-
"Give me an answer",
195-
system_prompt=None,
196-
image_files=None,
197-
json_schema=test_schema
195+
TEST_OLLAMA_MODEL, "Give me an answer", system_prompt=None, image_files=None, json_schema=test_schema
198196
)

test/test_core.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def test_chat_with_ollama_success(self, mock_ollama):
8383
model=TEST_OLLAMA_MODEL,
8484
messages=[{'role': 'user', 'content': 'Help me write a Python function'}],
8585
stream=False,
86-
format=None
86+
format=None,
8787
)
8888

8989
def test_chat_with_ollama_service_unavailable(self, mock_ollama):
@@ -103,18 +103,14 @@ def test_chat_with_ollama_model_not_found(self, mock_ollama):
103103
def test_chat_with_ollama_with_json_schema(self, mock_ollama, tmp_path):
104104
"""Ollama chat should forward the JSON schema (format=…) when provided."""
105105
# Fake schema file
106-
test_schema = {"schema": {"type":"object","properties":{"answer":{"type":"string"}}}}
106+
test_schema = {"schema": {"type": "object", "properties": {"answer": {"type": "string"}}}}
107107

108108
# Mock ollama response
109109
mock_response = MagicMock()
110110
mock_response.message.content = "42"
111111
mock_ollama.return_value = mock_response
112112

113-
result = chat_with_ollama(
114-
TEST_OLLAMA_MODEL,
115-
"What is the meaning of life?",
116-
json_schema=test_schema
117-
)
113+
result = chat_with_ollama(TEST_OLLAMA_MODEL, "What is the meaning of life?", json_schema=test_schema)
118114

119115
assert result == "42"
120116
mock_ollama.assert_called_once_with(

test/test_server_mode.py

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,9 @@ def test_server_mode_uses_llamacpp_when_available(self):
111111

112112
assert result == "Server response from DeepSeek V3"
113113
self.mock_available.assert_called_once_with(TEST_LLAMACPP_MODEL)
114-
self.mock_chat_server.assert_called_once_with(TEST_LLAMACPP_MODEL, 'Explain code', system_prompt=None, image_files=None, json_schema=None)
114+
self.mock_chat_server.assert_called_once_with(
115+
TEST_LLAMACPP_MODEL, 'Explain code', system_prompt=None, image_files=None, json_schema=None
116+
)
115117

116118
def test_server_mode_fallback_to_ollama_when_unavailable(self):
117119
"""Test server mode falls back to ollama when model not available in llama.cpp."""
@@ -122,7 +124,9 @@ def test_server_mode_fallback_to_ollama_when_unavailable(self):
122124

123125
assert result == "Ollama fallback response"
124126
self.mock_available.assert_called_once_with(TEST_OLLAMA_MODEL)
125-
self.mock_chat_ollama.assert_called_once_with(TEST_OLLAMA_MODEL, 'Debug code', system_prompt=None, image_files=None, json_schema=None)
127+
self.mock_chat_ollama.assert_called_once_with(
128+
TEST_OLLAMA_MODEL, 'Debug code', system_prompt=None, image_files=None, json_schema=None
129+
)
126130

127131
def test_server_mode_requires_server_url(self):
128132
"""Test server mode requires LLAMA_SERVER_URL to be set."""
@@ -184,21 +188,16 @@ def test_server_mode_passes_json_schema_to_llama_server(self, tmp_path, mock_req
184188
chat_with_model (server mode) should forward a json_schema file path
185189
and llama-server should receive the parsed schema in its JSON body.
186190
"""
187-
test_schema = {"schema": {"type":"object","properties":{"answer":{"type":"string"}}}}
191+
test_schema = {"schema": {"type": "object", "properties": {"answer": {"type": "string"}}}}
188192

189193
with patch('ai_server.server.is_llamacpp_available', return_value=True):
190194
mock_response = MagicMock()
191195
mock_response.status_code = 200
192-
mock_response.json.return_value = {
193-
"choices": [{"message": {"content": "Schema-aware server reply"}}]
194-
}
196+
mock_response.json.return_value = {"choices": [{"message": {"content": "Schema-aware server reply"}}]}
195197
mock_requests_post.return_value = mock_response
196198

197199
result = chat_with_model(
198-
TEST_LLAMACPP_MODEL,
199-
"Give me an answer",
200-
llama_mode="server",
201-
json_schema=test_schema
200+
TEST_LLAMACPP_MODEL, "Give me an answer", llama_mode="server", json_schema=test_schema
202201
)
203202

204203
assert result == "Schema-aware server reply"
@@ -213,4 +212,3 @@ def test_server_mode_passes_json_schema_to_llama_server(self, tmp_path, mock_req
213212
assert body["model"] == TEST_LLAMACPP_MODEL
214213
assert body["messages"][0]["content"] == "Give me an answer"
215214
assert body["json_schema"] == {"type": "object", "properties": {"answer": {"type": "string"}}}
216-

test/test_system_prompt.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,4 +77,6 @@ def test_chat_with_model_routing(self, mock_available, mock_chat):
7777
mock_chat.return_value = "result"
7878

7979
chat_with_model(TEST_MODEL, TEST_USER_CONTENT, 'cli', TEST_SYSTEM_PROMPT)
80-
mock_chat.assert_called_once_with(TEST_MODEL, TEST_USER_CONTENT, system_prompt=TEST_SYSTEM_PROMPT, image_files=None, json_schema=None)
80+
mock_chat.assert_called_once_with(
81+
TEST_MODEL, TEST_USER_CONTENT, system_prompt=TEST_SYSTEM_PROMPT, image_files=None, json_schema=None
82+
)

0 commit comments

Comments
 (0)