Skip to content

Commit 1be7bff

Browse files
committed
Use docker image to run CI tests
1 parent 5d3575e commit 1be7bff

File tree

2 files changed

+147
-7
lines changed

2 files changed

+147
-7
lines changed
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
name: Run Daemon Docker Container
2+
description: Pulls and runs LM Studio Daemon Docker image for testing
3+
4+
inputs:
5+
docker-image:
6+
description: "Full Docker image name"
7+
required: true
8+
container-name:
9+
description: "Name for the container"
10+
required: false
11+
default: "llmster-test"
12+
port:
13+
description: "Port to expose (host:container)"
14+
required: false
15+
default: "1234:1234"
16+
17+
outputs:
18+
container-id:
19+
description: "The ID of the running container"
20+
value: ${{ steps.run-container.outputs.container-id }}
21+
container-name:
22+
description: "The name of the running container"
23+
value: ${{ inputs.container-name }}
24+
25+
runs:
26+
using: "composite"
27+
steps:
28+
- name: Pull Docker image
29+
shell: bash
30+
run: |
31+
echo "Pulling image: ${{ inputs.docker-image }}"
32+
docker pull ${{ inputs.docker-image }}
33+
34+
- name: Run container
35+
id: run-container
36+
shell: bash
37+
run: |
38+
echo "Starting container: ${{ inputs.container-name }}"
39+
if [ "${{ inputs.use-local-image }}" = "true" ]; then
40+
echo "Using local image: ${{ inputs.docker-image }}"
41+
else
42+
echo "Using registry image: ${{ inputs.docker-image }}"
43+
fi
44+
CONTAINER_ID=$(docker run -d --name ${{ inputs.container-name }} -p ${{ inputs.port }} ${{ inputs.docker-image }})
45+
echo "Container ID: $CONTAINER_ID"
46+
echo "container-id=$CONTAINER_ID" >> $GITHUB_OUTPUT
47+
48+
# Wait for container to become healthy
49+
TIMEOUT=120 # timeout in seconds (increased to account for start-period)
50+
START_TIME=$(date +%s)
51+
END_TIME=$((START_TIME + TIMEOUT))
52+
53+
# Start with 1 second delay, then exponentially increase
54+
DELAY=1
55+
MAX_DELAY=16 # Cap maximum delay at 16 seconds
56+
57+
while [ $(date +%s) -lt $END_TIME ]; do
58+
HEALTH_STATUS=$(docker inspect --format='{{.State.Health.Status}}' ${{ inputs.container-name }} 2>/dev/null || echo "unknown")
59+
60+
if [ "$HEALTH_STATUS" = "healthy" ]; then
61+
echo "Container is running!"
62+
break
63+
elif [ "$HEALTH_STATUS" = "unhealthy" ]; then
64+
echo "Container is unhealthy - exiting"
65+
docker logs ${{ inputs.container-name }}
66+
exit 1
67+
fi
68+
69+
ELAPSED=$(($(date +%s) - START_TIME))
70+
71+
sleep $DELAY
72+
DELAY=$((DELAY * 2))
73+
if [ $DELAY -gt $MAX_DELAY ]; then
74+
DELAY=$MAX_DELAY
75+
fi
76+
done
77+
78+
# Final check after waiting for the maximum timeout
79+
# Print logs and the health status
80+
if [ $(date +%s) -ge $END_TIME ]; then
81+
echo "Container health check timed out after ${TIMEOUT} seconds"
82+
echo "Final health status: $(docker inspect --format='{{.State.Health.Status}}' ${{ inputs.container-name }})"
83+
docker logs ${{ inputs.container-name }}
84+
exit 1
85+
fi

.github/workflows/test.yml

Lines changed: 62 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,50 @@ jobs:
7373
restore-keys: |
7474
pip-${{ matrix.os }}-${{ matrix.python-version }}-v1-
7575
76+
- name: Run the built image
77+
id: run
78+
uses: ./.github/actions/docker-daemon-run
79+
with:
80+
docker-image: lmstudio/llmster-preview:cpu
81+
# Use the same port as the always on API server
82+
port: "41343:1234"
83+
84+
- name: Download models for tests (Ubuntu only)
85+
if: matrix.os == 'ubuntu-22.04'
86+
run: |
87+
echo "Downloading required models..."
88+
89+
# Download text LLMs
90+
docker exec llmster lms get https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q4_K_M-GGUF -y
91+
docker exec llmster lms get https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-GGUF -y
92+
docker exec llmster lms get https://huggingface.co/ZiangWu/MobileVLM_V2-1.7B-GGUF -y
93+
94+
# Download vision LLM
95+
docker exec llmster lms get ZiangWu/MobileVLM_V2-1.7B-GGUF -y
96+
97+
# Download additional model for speculative decoding examples
98+
docker exec llmster lms get qwen2.5-0.5b-instruct -y
99+
100+
echo "Model downloads complete"
101+
102+
- name: Load models into LM Studio (Ubuntu only)
103+
if: matrix.os == 'ubuntu-22.04'
104+
run: |
105+
echo "Loading models..."
106+
107+
# Load embedding model
108+
docker exec llmster lms load nomic-embed-text-v1.5 --identifier text-embedding-nomic-embed-text-v1.5 -y
109+
110+
# Load text LLMs
111+
docker exec llmster lms load llama-3.2-1b-instruct --identifier llama-3.2-1b-instruct -y
112+
docker exec llmster lms load qwen2.5-7b-instruct --identifier qwen2.5-7b-instruct-1m -y
113+
docker exec llmster lms load smollm2-135m-instruct --identifier smollm2-135m-instruct -y
114+
115+
# Load vision LLM
116+
docker exec llmster lms load ZiangWu/MobileVLM_V2-1.7B-GGUF --identifier mobilevlm_v2-1.7b
117+
118+
echo "Model loading complete"
119+
76120
- name: Install PDM
77121
run: |
78122
# Ensure `pdm` uses the same version as specified in `pdm.lock`
@@ -92,11 +136,18 @@ jobs:
92136
source "$VIRTUAL_ENV_BIN_DIR/activate"
93137
python -m tox -v -m static
94138
95-
- name: CI-compatible tests
139+
- name: CI-compatible tests (Windows)
140+
if: matrix.os == 'windows-2022'
96141
run: |
97142
source "$VIRTUAL_ENV_BIN_DIR/activate"
98143
python -m tox -v -- -m 'not lmstudio'
99144
145+
- name: All tests including LM Studio (Ubuntu)
146+
if: matrix.os == 'ubuntu-22.04'
147+
run: |
148+
source "$VIRTUAL_ENV_BIN_DIR/activate"
149+
python -m tox -v
150+
100151
- name: Upload coverage data
101152
uses: actions/upload-artifact@v4
102153
with:
@@ -105,6 +156,12 @@ jobs:
105156
include-hidden-files: true
106157
if-no-files-found: ignore
107158

159+
- name: Stop LM Studio Docker container (Ubuntu only)
160+
if: matrix.os == 'ubuntu-22.04' && always()
161+
run: |
162+
docker stop llmster || true
163+
docker rm llmster || true
164+
108165
# Coverage check based on https://hynek.me/articles/ditch-codecov-python/
109166
coverage:
110167
name: Combine & check coverage
@@ -143,12 +200,10 @@ jobs:
143200
# Report again and fail if under 50%.
144201
# Highest historical coverage: 65%
145202
# Last noted local test coverage level: 94%
146-
# CI coverage percentage is low because many of the tests
147-
# aren't CI compatible (they need a local LM Studio instance).
148-
# It's only as high as it is because the generated data model
149-
# classes make up such a large portion of the total SDK code.
150-
# Accept anything over 50% until CI is set up to run LM Studio
151-
# in headless mode, and hence is able to run end-to-end tests.
203+
# Ubuntu runners now run LM Studio in Docker, so they achieve higher
204+
# coverage than Windows runners (which skip LM Studio tests).
205+
# The generated data model classes make up a large portion of the SDK code.
206+
# Accept anything over 50% as Windows runners still skip LM Studio tests.
152207
coverage report --fail-under=50
153208
154209
- name: Upload HTML report if check failed

0 commit comments

Comments
 (0)