Skip to content

Commit 8aaf69a

Browse files
committed
add test speculative
1 parent ac404be commit 8aaf69a

File tree

2 files changed

+114
-1
lines changed

2 files changed

+114
-1
lines changed
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
import pytest
2+
from utils import *
3+
4+
# We use a F16 MOE gguf as main model, and q4_0 as draft model
5+
6+
server = ServerPreset.stories15m_moe()
7+
8+
MODEL_DRAFT_FILE_URL = "https://huggingface.co/ggml-org/models/resolve/main/tinyllamas/stories15M-q4_0.gguf"
9+
10+
def create_server():
11+
global server
12+
server = ServerPreset.stories15m_moe()
13+
# download draft model file if needed
14+
file_name = MODEL_DRAFT_FILE_URL.split('/').pop()
15+
model_draft_file = f'../../../{file_name}'
16+
if not os.path.exists(model_draft_file):
17+
print(f"Downloading {MODEL_DRAFT_FILE_URL} to {model_draft_file}")
18+
with open(model_draft_file, 'wb') as f:
19+
f.write(requests.get(MODEL_DRAFT_FILE_URL).content)
20+
print(f"Done downloading draft model file")
21+
# set default values
22+
server.model_draft = model_draft_file
23+
server.draft_min = 4
24+
server.draft_max = 8
25+
26+
27+
@pytest.fixture(scope="module", autouse=True)
28+
def fixture_create_server():
29+
return create_server()
30+
31+
32+
def test_with_and_without_draft():
33+
global server
34+
server.model_draft = None # disable draft model
35+
server.start()
36+
res = server.make_request("POST", "/completion", data={
37+
"prompt": "I believe the meaning of life is",
38+
"temperature": 0.0,
39+
"top_k": 1,
40+
})
41+
assert res.status_code == 200
42+
content_no_draft = res.body["content"]
43+
server.stop()
44+
45+
# create new server with draft model
46+
create_server()
47+
server.start()
48+
res = server.make_request("POST", "/completion", data={
49+
"prompt": "I believe the meaning of life is",
50+
"temperature": 0.0,
51+
"top_k": 1,
52+
})
53+
assert res.status_code == 200
54+
content_draft = res.body["content"]
55+
56+
assert content_no_draft == content_draft
57+
58+
59+
def test_different_draft_min_draft_max():
60+
global server
61+
test_values = [
62+
(1, 2),
63+
(1, 4),
64+
(4, 8),
65+
(4, 12),
66+
(8, 16),
67+
]
68+
last_content = None
69+
for draft_min, draft_max in test_values:
70+
server.stop()
71+
server.draft_min = draft_min
72+
server.draft_max = draft_max
73+
server.start()
74+
res = server.make_request("POST", "/completion", data={
75+
"prompt": "I believe the meaning of life is",
76+
"temperature": 0.0,
77+
"top_k": 1,
78+
})
79+
assert res.status_code == 200
80+
if last_content is not None:
81+
assert last_content == res.body["content"]
82+
last_content = res.body["content"]
83+
84+
85+
@pytest.mark.parametrize("n_slots,n_requests", [
86+
(1, 2),
87+
(2, 2),
88+
])
89+
def test_multi_requests_parallel(n_slots: int, n_requests: int):
90+
global server
91+
server.n_slots = n_slots
92+
server.start()
93+
tasks = []
94+
for _ in range(n_requests):
95+
tasks.append((server.make_request, ("POST", "/completion", {
96+
"prompt": "I believe the meaning of life is",
97+
"temperature": 0.0,
98+
"top_k": 1,
99+
})))
100+
results = parallel_function_calls(tasks)
101+
for res in results:
102+
assert res.status_code == 200
103+
assert match_regex("(wise|kind|owl|answer)+", res.body["content"])

examples/server/tests/utils.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ class ServerProcess:
4747
model_alias: str | None = None
4848
model_url: str | None = None
4949
model_file: str | None = None
50+
model_draft: str | None = None
5051
n_threads: int | None = None
5152
n_gpu_layer: int | None = None
5253
n_batch: int | None = None
@@ -69,6 +70,8 @@ class ServerProcess:
6970
response_format: str | None = None
7071
lora_files: List[str] | None = None
7172
disable_ctx_shift: int | None = False
73+
draft_min: int | None = None
74+
draft_max: int | None = None
7275

7376
# session variables
7477
process: subprocess.Popen | None = None
@@ -103,6 +106,8 @@ def start(self, timeout_seconds: int = 10) -> None:
103106
server_args.extend(["--model", self.model_file])
104107
if self.model_url:
105108
server_args.extend(["--model-url", self.model_url])
109+
if self.model_draft:
110+
server_args.extend(["--model-draft", self.model_draft])
106111
if self.model_hf_repo:
107112
server_args.extend(["--hf-repo", self.model_hf_repo])
108113
if self.model_hf_file:
@@ -148,6 +153,10 @@ def start(self, timeout_seconds: int = 10) -> None:
148153
server_args.extend(["--no-context-shift"])
149154
if self.api_key:
150155
server_args.extend(["--api-key", self.api_key])
156+
if self.draft_max:
157+
server_args.extend(["--draft-max", self.draft_max])
158+
if self.draft_min:
159+
server_args.extend(["--draft-min", self.draft_min])
151160

152161
args = [str(arg) for arg in [server_path, *server_args]]
153162
print(f"bench: starting server with: {' '.join(args)}")
@@ -200,7 +209,8 @@ def server_log(in_stream, out_stream):
200209
raise TimeoutError(f"Server did not start within {timeout_seconds} seconds")
201210

202211
def stop(self) -> None:
203-
server_instances.remove(self)
212+
if self in server_instances:
213+
server_instances.remove(self)
204214
if self.process:
205215
print(f"Stopping server with pid={self.process.pid}")
206216
self.process.kill()

0 commit comments

Comments
 (0)