Skip to content

Commit 055aa9e

Browse files
committed
update test
1 parent d47360e commit 055aa9e

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

examples/server/tests/unit/test_infill.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,28 +13,28 @@ def test_infill_without_input_extra():
1313
global server
1414
server.start()
1515
res = server.make_request("POST", "/infill", data={
16-
"prompt": "Complete this",
17-
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n int n_threads = llama_",
16+
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n",
17+
"prompt": " int n_threads = llama_",
1818
"input_suffix": "}\n",
1919
})
2020
assert res.status_code == 200
21-
assert match_regex("(One|day|she|saw|big|scary|bird)+", res.body["content"])
21+
assert match_regex("(Ann|small|shiny)+", res.body["content"])
2222

2323

2424
def test_infill_with_input_extra():
2525
global server
2626
server.start()
2727
res = server.make_request("POST", "/infill", data={
28-
"prompt": "Complete this",
2928
"input_extra": [{
3029
"filename": "llama.h",
3130
"text": "LLAMA_API int32_t llama_n_threads();\n"
3231
}],
33-
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n int n_threads = llama_",
32+
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n",
33+
"prompt": " int n_threads = llama_",
3434
"input_suffix": "}\n",
3535
})
3636
assert res.status_code == 200
37-
assert match_regex("(help|find|band)+", res.body["content"])
37+
assert match_regex("(Dad|excited|park)+", res.body["content"])
3838

3939

4040
@pytest.mark.parametrize("input_extra", [
@@ -65,12 +65,12 @@ def test_with_qwen_model():
6565
server.model_hf_file = "qwen2.5-coder-1.5b-iq3_xxs-imat.gguf"
6666
server.start(timeout_seconds=600)
6767
res = server.make_request("POST", "/infill", data={
68-
# "prompt": "Complete this", # FIXME: add more complicated prompt when format_infill is fixed
6968
"input_extra": [{
7069
"filename": "llama.h",
7170
"text": "LLAMA_API int32_t llama_n_threads();\n"
7271
}],
73-
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n int n_threads = llama_",
72+
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n",
73+
"prompt": " int n_threads = llama_",
7474
"input_suffix": "}\n",
7575
})
7676
assert res.status_code == 200

0 commit comments

Comments
 (0)