Skip to content

Commit 26d4d2d

Browse files
donny-wongDonny Wong
andauthored
Changed application name to markus_ai_server (#12)
Co-authored-by: Donny Wong <[email protected]>
1 parent 53753a7 commit 26d4d2d

File tree

12 files changed

+56
-49
lines changed

12 files changed

+56
-49
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,3 +172,6 @@ cython_debug/
172172

173173
# PyPI configuration file
174174
.pypirc
175+
176+
# Ignore JetBrains IDE configuration folder
177+
.idea/

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# ai-server
1+
# markus-ai-server
22

33
## Developers
44

pyproject.toml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ build-backend = "hatchling.build"
44

55

66
[project]
7-
name = "ai_server"
8-
version = "0.0.1"
7+
name = "markus_ai_server"
8+
version = "0.0.2"
99
authors = [
1010
{ name="David Liu", email="[email protected]" },
1111
]
@@ -33,8 +33,8 @@ dev = [
3333
]
3434

3535
[project.urls]
36-
Homepage = "https://github.com/MarkUsProject/ai-server"
37-
Issues = "https://github.com/MarkUsProject/ai-server/issues"
36+
Homepage = "https://github.com/MarkUsProject/markus-ai-server"
37+
Issues = "https://github.com/MarkUsProject/markus-ai-server/issues"
3838

3939
[tool.black]
4040
line-length = 120

test/test_cli_mode.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
os.environ.setdefault('REDIS_URL', 'redis://localhost:6379')
88

9-
from ai_server.server import chat_with_llamacpp, chat_with_model
9+
from markus_ai_server.server import chat_with_llamacpp, chat_with_model
1010

1111
# Test models
1212
TEST_LLAMACPP_MODEL = 'DeepSeek-V3-0324-UD-IQ2_XXS'
@@ -16,28 +16,28 @@
1616
@pytest.fixture
1717
def mock_subprocess():
1818
"""Mock subprocess.run for CLI tests."""
19-
with patch('ai_server.server.subprocess.run') as mock:
19+
with patch('markus_ai_server.server.subprocess.run') as mock:
2020
yield mock
2121

2222

2323
@pytest.fixture
2424
def mock_resolve_model_path():
2525
"""Mock resolve_model_path for CLI tests."""
26-
with patch('ai_server.server.resolve_model_path') as mock:
26+
with patch('markus_ai_server.server.resolve_model_path') as mock:
2727
yield mock
2828

2929

3030
@pytest.fixture
3131
def mock_glob():
3232
"""Mock glob.glob for model discovery tests."""
33-
with patch('ai_server.server.glob.glob') as mock:
33+
with patch('markus_ai_server.server.glob.glob') as mock:
3434
yield mock
3535

3636

3737
@pytest.fixture
3838
def mock_ollama():
3939
"""Mock ollama.chat for fallback tests."""
40-
with patch('ai_server.server.ollama.chat') as mock:
40+
with patch('markus_ai_server.server.ollama.chat') as mock:
4141
yield mock
4242

4343

@@ -91,9 +91,9 @@ class TestCLIModeRouting:
9191
@pytest.fixture(autouse=True)
9292
def setup_routing_mocks(self):
9393
"""Set up common mocks for routing tests."""
94-
with patch('ai_server.server.chat_with_llamacpp') as mock_chat_llamacpp, patch(
95-
'ai_server.server.is_llamacpp_available'
96-
) as mock_available, patch('ai_server.server.chat_with_ollama') as mock_chat_ollama:
94+
with patch('markus_ai_server.server.chat_with_llamacpp') as mock_chat_llamacpp, patch(
95+
'markus_ai_server.server.is_llamacpp_available'
96+
) as mock_available, patch('markus_ai_server.server.chat_with_ollama') as mock_chat_ollama:
9797
self.mock_chat_llamacpp = mock_chat_llamacpp
9898
self.mock_available = mock_available
9999
self.mock_chat_ollama = mock_chat_ollama
@@ -215,8 +215,8 @@ def test_cli_mode_passes_json_schema_to_ollama(self, tmp_path):
215215
test_schema = {"schema": {"type": "object", "properties": {"answer": {"type": "string"}}}}
216216

217217
# Prepare mocks
218-
with patch('ai_server.server.is_llamacpp_available', return_value=False), patch(
219-
'ai_server.server.chat_with_ollama'
218+
with patch('markus_ai_server.server.is_llamacpp_available', return_value=False), patch(
219+
'markus_ai_server.server.chat_with_ollama'
220220
) as mock_ollama:
221221
mock_ollama.return_value = "schema-aware response"
222222

test/test_core.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,11 @@
55

66
os.environ.setdefault('REDIS_URL', 'redis://localhost:6379')
77

8-
from ai_server.server import chat_with_ollama, is_llamacpp_available, resolve_model_path
8+
from markus_ai_server.server import (
9+
chat_with_ollama,
10+
is_llamacpp_available,
11+
resolve_model_path,
12+
)
913

1014
# Test models
1115
TEST_LLAMACPP_MODEL = 'DeepSeek-V3-0324-UD-IQ2_XXS'
@@ -15,14 +19,14 @@
1519
@pytest.fixture
1620
def mock_glob():
1721
"""Mock glob.glob for model discovery tests."""
18-
with patch('ai_server.server.glob.glob') as mock:
22+
with patch('markus_ai_server.server.glob.glob') as mock:
1923
yield mock
2024

2125

2226
@pytest.fixture
2327
def mock_ollama():
2428
"""Mock ollama.chat for ollama tests."""
25-
with patch('ai_server.server.ollama.chat') as mock:
29+
with patch('markus_ai_server.server.ollama.chat') as mock:
2630
yield mock
2731

2832

@@ -49,7 +53,7 @@ def test_resolve_model_path_not_found(self, mock_glob):
4953

5054
def test_is_llamacpp_available_true(self):
5155
"""Test model availability check when model exists."""
52-
with patch('ai_server.server.resolve_model_path') as mock_resolve:
56+
with patch('markus_ai_server.server.resolve_model_path') as mock_resolve:
5357
mock_resolve.return_value = f'/data1/GGUF/{TEST_LLAMACPP_MODEL}/{TEST_LLAMACPP_MODEL}.gguf'
5458

5559
result = is_llamacpp_available(TEST_LLAMACPP_MODEL)
@@ -59,7 +63,7 @@ def test_is_llamacpp_available_true(self):
5963

6064
def test_is_llamacpp_available_false(self):
6165
"""Test model availability check when model doesn't exist."""
62-
with patch('ai_server.server.resolve_model_path') as mock_resolve:
66+
with patch('markus_ai_server.server.resolve_model_path') as mock_resolve:
6367
mock_resolve.return_value = None
6468

6569
result = is_llamacpp_available('nonexistent-model')

test/test_server_mode.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
os.environ.setdefault('REDIS_URL', 'redis://localhost:6379')
77

8-
from ai_server.server import chat_with_llama_server_http, chat_with_model
8+
from markus_ai_server.server import chat_with_llama_server_http, chat_with_model
99

1010
# Test models
1111
TEST_LLAMACPP_MODEL = 'DeepSeek-V3-0324-UD-IQ2_XXS'
@@ -15,28 +15,28 @@
1515
@pytest.fixture
1616
def mock_requests_post():
1717
"""Mock requests.post for HTTP tests."""
18-
with patch('ai_server.server.requests.post') as mock:
18+
with patch('markus_ai_server.server.requests.post') as mock:
1919
yield mock
2020

2121

2222
@pytest.fixture
2323
def mock_llama_server_url():
2424
"""Mock LLAMA_SERVER_URL for server tests."""
25-
with patch('ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'):
25+
with patch('markus_ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'):
2626
yield
2727

2828

2929
@pytest.fixture
3030
def mock_glob():
3131
"""Mock glob.glob for model discovery tests."""
32-
with patch('ai_server.server.glob.glob') as mock:
32+
with patch('markus_ai_server.server.glob.glob') as mock:
3333
yield mock
3434

3535

3636
@pytest.fixture
3737
def mock_ollama():
3838
"""Mock ollama.chat for fallback tests."""
39-
with patch('ai_server.server.ollama.chat') as mock:
39+
with patch('markus_ai_server.server.ollama.chat') as mock:
4040
yield mock
4141

4242

@@ -62,7 +62,7 @@ def test_chat_with_llama_server_http_success(self, mock_requests_post, mock_llam
6262

6363
def test_chat_with_llama_server_http_no_url(self):
6464
"""Test HTTP chat when LLAMA_SERVER_URL is not set."""
65-
with patch('ai_server.server.LLAMA_SERVER_URL', None):
65+
with patch('markus_ai_server.server.LLAMA_SERVER_URL', None):
6666
with pytest.raises(Exception, match="LLAMA_SERVER_URL environment variable not set"):
6767
chat_with_llama_server_http(TEST_LLAMACPP_MODEL, 'Hello')
6868

@@ -92,10 +92,10 @@ class TestServerModeRouting:
9292
@pytest.fixture(autouse=True)
9393
def setup_routing_mocks(self):
9494
"""Set up common mocks for routing tests."""
95-
with patch('ai_server.server.chat_with_llama_server_http') as mock_chat_server, patch(
96-
'ai_server.server.is_llamacpp_available'
97-
) as mock_available, patch('ai_server.server.chat_with_ollama') as mock_chat_ollama, patch(
98-
'ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'
95+
with patch('markus_ai_server.server.chat_with_llama_server_http') as mock_chat_server, patch(
96+
'markus_ai_server.server.is_llamacpp_available'
97+
) as mock_available, patch('markus_ai_server.server.chat_with_ollama') as mock_chat_ollama, patch(
98+
'markus_ai_server.server.LLAMA_SERVER_URL', 'http://localhost:8080'
9999
):
100100
self.mock_chat_server = mock_chat_server
101101
self.mock_available = mock_available
@@ -158,7 +158,7 @@ def test_server_mode_fallback_to_ollama_with_model_options(self):
158158

159159
def test_server_mode_requires_server_url(self):
160160
"""Test server mode requires LLAMA_SERVER_URL to be set."""
161-
with patch('ai_server.server.LLAMA_SERVER_URL', None):
161+
with patch('markus_ai_server.server.LLAMA_SERVER_URL', None):
162162
self.mock_available.return_value = True
163163

164164
with pytest.raises(Exception, match="LLAMA_SERVER_URL environment variable not set"):
@@ -218,7 +218,7 @@ def test_server_mode_passes_json_schema_to_llama_server(self, tmp_path, mock_req
218218
"""
219219
test_schema = {"schema": {"type": "object", "properties": {"answer": {"type": "string"}}}}
220220

221-
with patch('ai_server.server.is_llamacpp_available', return_value=True):
221+
with patch('markus_ai_server.server.is_llamacpp_available', return_value=True):
222222
mock_response = MagicMock()
223223
mock_response.status_code = 200
224224
mock_response.json.return_value = {"choices": [{"message": {"content": "Schema-aware server reply"}}]}

0 commit comments

Comments
 (0)