Skip to content

Commit edd109b

Browse files
committed
Modified paths to be OS-independent
1 parent fc31877 commit edd109b

File tree

5 files changed

+34
-23
lines changed

5 files changed

+34
-23
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,3 +3,4 @@
33
## [unreleased]
44

55
- Modified project structure and configuration to use uv
6+
- Modified paths to be OS-independent

src/markus_ai_server/server.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import logging
77
import os
88
import subprocess
9+
from pathlib import Path
910
from typing import Optional
1011

1112
import ollama
@@ -28,8 +29,8 @@
2829
DEFAULT_MODEL = os.getenv('DEFAULT_MODEL', 'deepseek-coder-v2:latest')
2930

3031
# Llama.cpp configuration
31-
LLAMA_CPP_CLI = os.getenv('LLAMA_CPP_CLI', '/data1/llama.cpp/bin/llama-cli')
32-
GGUF_DIR = os.getenv('GGUF_DIR', '/data1/GGUF')
32+
LLAMA_CPP_CLI = os.getenv('LLAMA_CPP_CLI', str(Path('/data1/llama.cpp/bin/llama-cli')))
33+
GGUF_DIR = os.getenv('GGUF_DIR', str(Path('/data1/GGUF')))
3334

3435
# Llama server configuration
3536
_llama_server_url = os.getenv('LLAMA_SERVER_URL') # e.g., http://localhost:8080 or localhost:8080

test/test_cli_mode.py

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import os
22
import subprocess
3+
from pathlib import Path
34
from unittest.mock import MagicMock, patch
45

56
import pytest
@@ -46,7 +47,7 @@ class TestLlamaCppCLI:
4647

4748
def test_chat_with_llamacpp_success(self, mock_resolve_model_path, mock_subprocess):
4849
"""Test successful CLI chat with llama.cpp."""
49-
model_path = f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'
50+
model_path = str(Path(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'))
5051
mock_resolve_model_path.return_value = model_path
5152

5253
mock_result = MagicMock()
@@ -61,7 +62,7 @@ def test_chat_with_llamacpp_success(self, mock_resolve_model_path, mock_subproce
6162
# Verify correct CLI command structure
6263
args, kwargs = mock_subprocess.call_args
6364
cmd = args[0]
64-
assert '/data1/llama.cpp/bin/llama-cli' in cmd
65+
assert str(Path('/data1/llama.cpp/bin/llama-cli')) in cmd
6566
assert '-m' in cmd and model_path in cmd
6667
assert '--n-gpu-layers' in cmd and '40' in cmd
6768
assert '--single-turn' in cmd
@@ -75,7 +76,9 @@ def test_chat_with_llamacpp_model_not_found(self, mock_resolve_model_path):
7576

7677
def test_chat_with_llamacpp_subprocess_error(self, mock_resolve_model_path, mock_subprocess):
7778
"""Test CLI chat when subprocess fails."""
78-
mock_resolve_model_path.return_value = f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'
79+
mock_resolve_model_path.return_value = str(
80+
Path(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf')
81+
)
7982

8083
error = subprocess.CalledProcessError(1, 'cmd')
8184
error.stderr = b'CUDA out of memory'
@@ -91,9 +94,11 @@ class TestCLIModeRouting:
9194
@pytest.fixture(autouse=True)
9295
def setup_routing_mocks(self):
9396
"""Set up common mocks for routing tests."""
94-
with patch('markus_ai_server.server.chat_with_llamacpp') as mock_chat_llamacpp, patch(
95-
'markus_ai_server.server.is_llamacpp_available'
96-
) as mock_available, patch('markus_ai_server.server.chat_with_ollama') as mock_chat_ollama:
97+
with (
98+
patch('markus_ai_server.server.chat_with_llamacpp') as mock_chat_llamacpp,
99+
patch('markus_ai_server.server.is_llamacpp_available') as mock_available,
100+
patch('markus_ai_server.server.chat_with_ollama') as mock_chat_ollama,
101+
):
97102
self.mock_chat_llamacpp = mock_chat_llamacpp
98103
self.mock_available = mock_available
99104
self.mock_chat_ollama = mock_chat_ollama
@@ -177,7 +182,7 @@ class TestCLIModeIntegration:
177182

178183
def test_complete_cli_flow_with_real_model(self, mock_glob, mock_subprocess):
179184
"""Test complete CLI flow: model resolution → CLI execution."""
180-
model_path = f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'
185+
model_path = str(Path(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'))
181186

182187
mock_glob.return_value = [model_path]
183188
mock_result = MagicMock()
@@ -204,7 +209,7 @@ def test_complete_cli_fallback_flow_to_ollama(self, mock_glob, mock_ollama):
204209
result = chat_with_model(TEST_OLLAMA_MODEL, 'Fallback test', llama_mode='cli')
205210

206211
assert result == "Ollama CLI fallback integration test successful!"
207-
mock_glob.assert_called_once_with(f'/data1/GGUF/{TEST_OLLAMA_MODEL}/*.gguf')
212+
mock_glob.assert_called_once_with(str(Path(f'/data1/GGUF/{TEST_OLLAMA_MODEL}/*.gguf')))
208213
mock_ollama.assert_called_once()
209214

210215
def test_cli_mode_passes_json_schema_to_ollama(self, tmp_path):
@@ -215,9 +220,10 @@ def test_cli_mode_passes_json_schema_to_ollama(self, tmp_path):
215220
test_schema = {"schema": {"type": "object", "properties": {"answer": {"type": "string"}}}}
216221

217222
# Prepare mocks
218-
with patch('markus_ai_server.server.is_llamacpp_available', return_value=False), patch(
219-
'markus_ai_server.server.chat_with_ollama'
220-
) as mock_ollama:
223+
with (
224+
patch('markus_ai_server.server.is_llamacpp_available', return_value=False),
225+
patch('markus_ai_server.server.chat_with_ollama') as mock_ollama,
226+
):
221227
mock_ollama.return_value = "schema-aware response"
222228

223229
result = chat_with_model(TEST_OLLAMA_MODEL, "Give me an answer", llama_mode='cli', json_schema=test_schema)

test/test_core.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
from pathlib import Path
23
from unittest.mock import MagicMock, patch
34

45
import pytest
@@ -35,13 +36,13 @@ class TestModelResolution:
3536

3637
def test_resolve_model_path_found(self, mock_glob):
3738
"""Test model path resolution when model exists."""
38-
model_path = f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'
39+
model_path = str(Path(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'))
3940
mock_glob.return_value = [model_path]
4041

4142
result = resolve_model_path(TEST_LLAMACPP_MODEL)
4243

4344
assert result == model_path
44-
mock_glob.assert_called_once_with(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/*.gguf')
45+
mock_glob.assert_called_once_with(str(Path(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/*.gguf')))
4546

4647
def test_resolve_model_path_not_found(self, mock_glob):
4748
"""Test model path resolution when model doesn't exist."""
@@ -54,7 +55,7 @@ def test_resolve_model_path_not_found(self, mock_glob):
5455
def test_is_llamacpp_available_true(self):
5556
"""Test model availability check when model exists."""
5657
with patch('markus_ai_server.server.resolve_model_path') as mock_resolve:
57-
mock_resolve.return_value = f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'
58+
mock_resolve.return_value = str(Path(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'))
5859

5960
result = is_llamacpp_available(TEST_LLAMACPP_MODEL)
6061

test/test_server_mode.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
from pathlib import Path
23
from unittest.mock import MagicMock, patch
34

45
import pytest
@@ -92,10 +93,11 @@ class TestServerModeRouting:
9293
@pytest.fixture(autouse=True)
9394
def setup_routing_mocks(self):
9495
"""Set up common mocks for routing tests."""
95-
with patch('markus_ai_server.server.chat_with_llama_server_http') as mock_chat_server, patch(
96-
'markus_ai_server.server.is_llamacpp_available'
97-
) as mock_available, patch('markus_ai_server.server.chat_with_ollama') as mock_chat_ollama, patch(
98-
'markus_ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'
96+
with (
97+
patch('markus_ai_server.server.chat_with_llama_server_http') as mock_chat_server,
98+
patch('markus_ai_server.server.is_llamacpp_available') as mock_available,
99+
patch('markus_ai_server.server.chat_with_ollama') as mock_chat_ollama,
100+
patch('markus_ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'),
99101
):
100102
self.mock_chat_server = mock_chat_server
101103
self.mock_available = mock_available
@@ -177,7 +179,7 @@ class TestServerModeIntegration:
177179

178180
def test_complete_server_flow_with_real_model(self, mock_glob, mock_requests_post, mock_llama_server_url):
179181
"""Test complete server flow: model resolution → HTTP API call."""
180-
model_path = f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'
182+
model_path = str(Path(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'))
181183

182184
# Mock model found (only checked once for availability in server mode)
183185
mock_glob.return_value = [model_path]
@@ -192,7 +194,7 @@ def test_complete_server_flow_with_real_model(self, mock_glob, mock_requests_pos
192194

193195
assert result == "Server integration test successful!"
194196
# In server mode, glob.glob only called once for is_llamacpp_available
195-
mock_glob.assert_called_once_with(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/*.gguf')
197+
mock_glob.assert_called_once_with(str(Path(f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/*.gguf')))
196198
mock_requests_post.assert_called_once()
197199

198200
def test_complete_server_fallback_flow_to_ollama(self, mock_glob, mock_ollama, mock_llama_server_url):
@@ -208,7 +210,7 @@ def test_complete_server_fallback_flow_to_ollama(self, mock_glob, mock_ollama, m
208210
result = chat_with_model(TEST_OLLAMA_MODEL, 'Fallback test', llama_mode='server')
209211

210212
assert result == "Ollama server fallback integration test successful!"
211-
mock_glob.assert_called_once_with(f'/data1/GGUF/{TEST_OLLAMA_MODEL}/*.gguf')
213+
mock_glob.assert_called_once_with(str(Path(f'/data1/GGUF/{TEST_OLLAMA_MODEL}/*.gguf')))
212214
mock_ollama.assert_called_once()
213215

214216
def test_server_mode_passes_json_schema_to_llama_server(self, tmp_path, mock_requests_post, mock_llama_server_url):

0 commit comments

Comments
 (0)