|
| 1 | +import pytest |
| 2 | +from openai import OpenAI |
| 3 | +from utils import * |
| 4 | + |
| 5 | +server = ServerPreset.tinyllama2() |
| 6 | + |
| 7 | + |
| 8 | +@pytest.fixture(scope="module", autouse=True) |
| 9 | +def create_server(): |
| 10 | + global server |
| 11 | + server = ServerPreset.tinyllama2() |
| 12 | + |
| 13 | + |
| 14 | +@pytest.mark.parametrize( |
| 15 | + "model,system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,truncated", |
| 16 | + [ |
| 17 | + ("llama-2", "Book", "What is the best book", 8, "(Suddenly)+", 77, 8, False), |
| 18 | + ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, False), |
| 19 | + ] |
| 20 | +) |
| 21 | +def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, truncated): |
| 22 | + global server |
| 23 | + server.start() |
| 24 | + res = server.make_request("POST", "/chat/completions", data={ |
| 25 | + "model": model, |
| 26 | + "max_tokens": max_tokens, |
| 27 | + "messages": [ |
| 28 | + {"role": "system", "content": system_prompt}, |
| 29 | + {"role": "user", "content": user_prompt}, |
| 30 | + ], |
| 31 | + }) |
| 32 | + assert res.status_code == 200 |
| 33 | + assert res.body["usage"]["prompt_tokens"] == n_prompt |
| 34 | + assert res.body["usage"]["completion_tokens"] == n_predicted |
| 35 | + choice = res.body["choices"][0] |
| 36 | + assert "assistant" == choice["message"]["role"] |
| 37 | + assert match_regex(re_content, choice["message"]["content"]) |
| 38 | + if truncated: |
| 39 | + assert choice["finish_reason"] == "length" |
| 40 | + else: |
| 41 | + assert choice["finish_reason"] == "stop" |
| 42 | + |
| 43 | + |
| 44 | +@pytest.mark.parametrize( |
| 45 | + "model,system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,truncated", |
| 46 | + [ |
| 47 | + ("llama-2", "Book", "What is the best book", 8, "(Suddenly)+", 77, 8, False), |
| 48 | + ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, False), |
| 49 | + ] |
| 50 | +) |
| 51 | +def test_chat_completion_stream(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, truncated): |
| 52 | + global server |
| 53 | + server.start() |
| 54 | + res = server.make_stream_request("POST", "/chat/completions", data={ |
| 55 | + "model": model, |
| 56 | + "max_tokens": max_tokens, |
| 57 | + "messages": [ |
| 58 | + {"role": "system", "content": system_prompt}, |
| 59 | + {"role": "user", "content": user_prompt}, |
| 60 | + ], |
| 61 | + "stream": True, |
| 62 | + }) |
| 63 | + content = "" |
| 64 | + for data in res: |
| 65 | + choice = data["choices"][0] |
| 66 | + if choice["finish_reason"] in ["stop", "length"]: |
| 67 | + assert data["usage"]["prompt_tokens"] == n_prompt |
| 68 | + assert data["usage"]["completion_tokens"] == n_predicted |
| 69 | + assert "content" not in choice["delta"] |
| 70 | + assert match_regex(re_content, content) |
| 71 | + # FIXME: not sure why this is incorrect in stream mode |
| 72 | + # if truncated: |
| 73 | + # assert choice["finish_reason"] == "length" |
| 74 | + # else: |
| 75 | + # assert choice["finish_reason"] == "stop" |
| 76 | + else: |
| 77 | + assert choice["finish_reason"] is None |
| 78 | + content += choice["delta"]["content"] |
| 79 | + |
| 80 | + |
| 81 | +def test_chat_completion_with_openai_library(): |
| 82 | + global server |
| 83 | + server.start() |
| 84 | + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") |
| 85 | + res = client.chat.completions.create( |
| 86 | + model="gpt-3.5-turbo-instruct", |
| 87 | + messages=[ |
| 88 | + {"role": "system", "content": "Book"}, |
| 89 | + {"role": "user", "content": "What is the best book"}, |
| 90 | + ], |
| 91 | + max_tokens=8, |
| 92 | + seed=42, |
| 93 | + temperature=0.8, |
| 94 | + ) |
| 95 | + print(res) |
| 96 | + assert res.choices[0].finish_reason == "stop" |
| 97 | + assert res.choices[0].message.content is not None |
| 98 | + assert match_regex("(Suddenly)+", res.choices[0].message.content) |
| 99 | + |
| 100 | + |
| 101 | +@pytest.mark.parametrize("response_format,n_predicted,re_content", [ |
| 102 | + ({"type": "json_object", "schema": {"const": "42"}}, 6, "\"42\""), |
| 103 | + ({"type": "json_object", "schema": {"items": [{"type": "integer"}]}}, 10, "[ -3000 ]"), |
| 104 | + ({"type": "json_object"}, 10, "(\\{|John)+"), |
| 105 | + ({"type": "sound"}, 0, None), |
| 106 | + # invalid response format (expected to fail) |
| 107 | + ({"type": "json_object", "schema": 123}, 0, None), |
| 108 | + ({"type": "json_object", "schema": {"type": 123}}, 0, None), |
| 109 | + ({"type": "json_object", "schema": {"type": "hiccup"}}, 0, None), |
| 110 | +]) |
| 111 | +def test_completion_with_response_format(response_format: dict, n_predicted: int, re_content: str | None): |
| 112 | + global server |
| 113 | + server.start() |
| 114 | + res = server.make_request("POST", "/chat/completions", data={ |
| 115 | + "max_tokens": n_predicted, |
| 116 | + "messages": [ |
| 117 | + {"role": "system", "content": "You are a coding assistant."}, |
| 118 | + {"role": "user", "content": "Write an example"}, |
| 119 | + ], |
| 120 | + "response_format": response_format, |
| 121 | + }) |
| 122 | + if re_content is not None: |
| 123 | + assert res.status_code == 200 |
| 124 | + choice = res.body["choices"][0] |
| 125 | + assert match_regex(re_content, choice["message"]["content"]) |
| 126 | + else: |
| 127 | + assert res.status_code != 200 |
| 128 | + assert "error" in res.body |
| 129 | + |
0 commit comments