@@ -108,7 +108,9 @@ def test_cli_mode_uses_llamacpp_when_available(self):
108108
109109 assert result == "CLI response from DeepSeek V3"
110110 self .mock_available .assert_called_once_with (TEST_LLAMACPP_MODEL )
111- self .mock_chat_llamacpp .assert_called_once_with (TEST_LLAMACPP_MODEL , 'Write a function' , system_prompt = None , image_files = None , json_schema = None )
111+ self .mock_chat_llamacpp .assert_called_once_with (
112+ TEST_LLAMACPP_MODEL , 'Write a function' , system_prompt = None , image_files = None , json_schema = None
113+ )
112114
113115 def test_cli_mode_fallback_to_ollama_when_unavailable (self ):
114116 """Test CLI mode falls back to ollama when model not available in llama.cpp."""
@@ -119,7 +121,9 @@ def test_cli_mode_fallback_to_ollama_when_unavailable(self):
119121
120122 assert result == "Ollama response from DeepSeek Coder"
121123 self .mock_available .assert_called_once_with (TEST_OLLAMA_MODEL )
122- self .mock_chat_ollama .assert_called_once_with (TEST_OLLAMA_MODEL , 'Help with coding' , system_prompt = None , image_files = None , json_schema = None )
124+ self .mock_chat_ollama .assert_called_once_with (
125+ TEST_OLLAMA_MODEL , 'Help with coding' , system_prompt = None , image_files = None , json_schema = None
126+ )
123127
124128 def test_default_mode_is_cli (self ):
125129 """Test that default mode is CLI when no llama_mode specified."""
@@ -130,7 +134,9 @@ def test_default_mode_is_cli(self):
130134
131135 assert result == "Default CLI mode response"
132136 self .mock_available .assert_called_once_with (TEST_LLAMACPP_MODEL )
133- self .mock_chat_llamacpp .assert_called_once_with (TEST_LLAMACPP_MODEL , 'Help me' , system_prompt = None , image_files = None , json_schema = None )
137+ self .mock_chat_llamacpp .assert_called_once_with (
138+ TEST_LLAMACPP_MODEL , 'Help me' , system_prompt = None , image_files = None , json_schema = None
139+ )
134140
135141
136142class TestCLIModeIntegration :
@@ -173,26 +179,18 @@ def test_cli_mode_passes_json_schema_to_ollama(self, tmp_path):
173179 When json_schema is supplied, chat_with_model should forward the parsed
174180 schema (as a dict) to chat_with_ollama.
175181 """
176- test_schema = {"schema" : {"type" :"object" ,"properties" :{"answer" :{"type" :"string" }}}}
182+ test_schema = {"schema" : {"type" : "object" , "properties" : {"answer" : {"type" : "string" }}}}
177183
178184 # Prepare mocks
179- with patch ('ai_server.server.is_llamacpp_available' , return_value = False ), \
180- patch ('ai_server.server.chat_with_ollama' ) as mock_ollama :
185+ with patch ('ai_server.server.is_llamacpp_available' , return_value = False ), patch (
186+ 'ai_server.server.chat_with_ollama'
187+ ) as mock_ollama :
181188 mock_ollama .return_value = "schema-aware response"
182189
183- result = chat_with_model (
184- TEST_OLLAMA_MODEL ,
185- "Give me an answer" ,
186- llama_mode = 'cli' ,
187- json_schema = test_schema
188- )
190+ result = chat_with_model (TEST_OLLAMA_MODEL , "Give me an answer" , llama_mode = 'cli' , json_schema = test_schema )
189191
190192 assert result == "schema-aware response"
191193
192194 mock_ollama .assert_called_once_with (
193- TEST_OLLAMA_MODEL ,
194- "Give me an answer" ,
195- system_prompt = None ,
196- image_files = None ,
197- json_schema = test_schema
195+ TEST_OLLAMA_MODEL , "Give me an answer" , system_prompt = None , image_files = None , json_schema = test_schema
198196 )
0 commit comments