Skip to content

Commit fc31877

Browse files
committed
Updated project to use uv
1 parent 450e9c1 commit fc31877

File tree

9 files changed

+1111
-39
lines changed

9 files changed

+1111
-39
lines changed

.github/workflows/test.yml

Lines changed: 11 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -15,23 +15,17 @@ jobs:
1515
python-version: ["3.13"]
1616

1717
steps:
18-
- uses: actions/[email protected]
19-
- name: Set up Python ${{ matrix.python-version }}
20-
uses: actions/[email protected]
18+
- name: Check out repository
19+
uses: actions/checkout@v6
20+
21+
- name: Install uv
22+
uses: astral-sh/setup-uv@v7
2123
with:
2224
python-version: ${{ matrix.python-version }}
23-
- name: Cache pip
24-
uses: actions/[email protected]
25-
with:
26-
path: ~/.cache/pip
27-
key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml') }}
28-
restore-keys: |
29-
${{ runner.os }}-pip-
30-
${{ runner.os }}-
31-
- name: Install dependencies
32-
run: |
33-
python -m pip install --upgrade pip setuptools wheel
34-
pip install -e .[dev]
25+
enable-cache: true
26+
27+
- name: Sync dependencies
28+
run: uv sync --locked --all-extras --dev
29+
3530
- name: Run tests
36-
run: |
37-
pytest -vv
31+
run: uv run pytest -vv

.python-version

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3.13

CHANGELOG.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# Changelog
2+
3+
## [unreleased]
4+
5+
- Modified project structure and configuration to use uv

pyproject.toml

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,17 @@
1-
[build-system]
2-
requires = ["hatchling >= 1.26"]
3-
build-backend = "hatchling.build"
4-
5-
61
[project]
72
name = "markus_ai_server"
83
version = "0.0.2"
94
authors = [
5+
{ name="Rolland He" },
6+
{ name="Will Kukkamalla" },
7+
{ name="Donny Wong" },
8+
]
9+
maintainers = [
1010
{ name="David Liu", email="[email protected]" },
1111
]
12-
description = "A small server to control access to Ollama"
12+
description = "A small server to control access to local models for MarkUs autograding"
1313
readme = "README.md"
14-
requires-python = ">=3.8"
14+
requires-python = ">=3.9"
1515
classifiers = [
1616
"Programming Language :: Python :: 3",
1717
"Operating System :: OS Independent",
@@ -26,7 +26,8 @@ dependencies = [
2626
"requests",
2727
]
2828

29-
[project.optional-dependencies]
29+
30+
[dependency-groups]
3031
dev = [
3132
"pre-commit",
3233
"pytest",
@@ -36,6 +37,10 @@ dev = [
3637
Homepage = "https://github.com/MarkUsProject/markus-ai-server"
3738
Issues = "https://github.com/MarkUsProject/markus-ai-server/issues"
3839

40+
[build-system]
41+
requires = ["uv_build >= 0.9.17, <0.10.0"]
42+
build-backend = "uv_build"
43+
3944
[tool.black]
4045
line-length = 120
4146
skip-string-normalization = true
Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -78,9 +78,7 @@ def chat_with_llama_server_http(
7878
if 'stream' not in payload:
7979
payload['stream'] = False
8080

81-
start_log_data = {
82-
'model': model
83-
}
81+
start_log_data = {'model': model}
8482
logger.info(f'chat_with_llama_server_http starting: {start_log_data}')
8583
response = requests.post(
8684
f'{LLAMA_SERVER_URL}/v1/chat/completions',
@@ -89,10 +87,7 @@ def chat_with_llama_server_http(
8987
timeout=timeout,
9088
)
9189

92-
done_log_data = {
93-
'model': model,
94-
'response_status_code': response.status_code
95-
}
90+
done_log_data = {'model': model, 'response_status_code': response.status_code}
9691
logger.info(f'chat_with_llama_server_http done: {start_log_data}')
9792
if response.status_code == 200:
9893
data = response.json()
@@ -132,9 +127,7 @@ def chat_with_ollama(
132127
"""Handle chat using ollama."""
133128
messages = _build_messages(content, system_prompt, image_files)
134129

135-
start_log_data = {
136-
'model': model
137-
}
130+
start_log_data = {'model': model}
138131
logger.info(f'chat_with_ollama starting: {start_log_data}')
139132
response = ollama.chat(
140133
model=model,
@@ -148,7 +141,7 @@ def chat_with_ollama(
148141
'eval_duration': response.eval_duration,
149142
'prompt_eval_duration': response.prompt_eval_duration,
150143
'eval_count': response.eval_count,
151-
'prompt_eval_count': response.prompt_eval_count
144+
'prompt_eval_count': response.prompt_eval_count,
152145
}
153146

154147
logger.info(f'chat_with_ollama done: {done_log_data}')
@@ -187,9 +180,7 @@ def chat_with_llamacpp(
187180
pass # TODO: pass image files
188181

189182
try:
190-
start_log_data = {
191-
'model': model
192-
}
183+
start_log_data = {'model': model}
193184
logger.info(f'chat_with_llamacpp starting: {start_log_data}')
194185
result = subprocess.run(cmd, capture_output=True, text=False, timeout=timeout, check=True)
195186
logger.info(f'chat_with_llamacpp done: {start_log_data}')

uv.lock

Lines changed: 1076 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)