Skip to content

Commit f8e39fd

Browse files
committed
更新lmdeoploy版本
1 parent 7674e90 commit f8e39fd

File tree

12 files changed

+383
-120
lines changed

12 files changed

+383
-120
lines changed

gpt_server/model_backend/lmdeploy_backend.py

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from typing import Any, Dict, AsyncGenerator, List, Optional
1010
from lmdeploy.archs import get_task
1111
from gpt_server.model_handler.reasoning_parser import ReasoningParserManager
12-
from lmdeploy.serve.async_engine import get_names_from_model
12+
from lmdeploy.serve.async_engine import best_match_model
1313
from loguru import logger
1414
from gpt_server.model_backend.base import ModelBackend
1515
from gpt_server.settings import get_model_config
@@ -124,7 +124,7 @@ def __init__(self, model_path, tokenizer: PreTrainedTokenizerBase) -> None:
124124
backend=backend,
125125
backend_config=backend_config,
126126
)
127-
model_name, chat_template_name = get_names_from_model(model_path=model_path)
127+
chat_template_name = best_match_model(query=model_path)
128128
self.chat_template_name = chat_template_name
129129
self.tokenizer = self.async_engine.tokenizer
130130
self.reasoning_parser_cache = {}
@@ -166,18 +166,14 @@ async def stream_chat(self, params: Dict[str, Any]) -> AsyncGenerator:
166166
skip_special_tokens=True,
167167
response_format=params["response_format"],
168168
)
169-
if params.get("tools", None) or is_messages_with_tool(messages=messages):
170-
messages = prompt or messages # 解决lmdeploy 的提示模板不支持 tools
169+
# if params.get("tools", None) or is_messages_with_tool(messages=messages):
170+
# messages = prompt or messages # 解决lmdeploy 的提示模板不支持 tools
171171
logger.info(f"chat_template_name: {self.chat_template_name}")
172-
if self.chat_template_name == "base":
173-
messages = prompt or messages
174-
multimodal = params.get("multimodal", False)
175-
if multimodal: # 多模态模型
176-
messages = params["messages"]
177-
if isinstance(messages, str):
178-
logger.info(f"使用prompt模式")
179-
else:
180-
logger.info(f"使用messages模式")
172+
# if self.chat_template_name == "base":
173+
# messages = prompt or messages
174+
# multimodal = params.get("multimodal", False)
175+
# if multimodal: # 多模态模型
176+
# messages = params["messages"]
181177
results_generator = self.async_engine.generate(
182178
messages=messages,
183179
session_id=int(request_id),

gpt_server/model_backend/vllm_backend.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ def __init__(self, model_path, tokenizer: AutoTokenizer) -> None:
6161
async def stream_chat(self, params: Dict[str, Any]) -> AsyncGenerator:
6262
prompt = params.get("prompt", "")
6363
messages = params["messages"]
64-
logger.info(f"prompt:\n{prompt}")
6564
request_id = params.get("request_id", "0")
6665
temperature = float(params.get("temperature", 0.8))
6766
top_p = float(params.get("top_p", 0.8))
@@ -74,6 +73,7 @@ async def stream_chat(self, params: Dict[str, Any]) -> AsyncGenerator:
7473
repetition_penalty = float(params.get("repetition_penalty", 1.0))
7574
enable_thinking = bool(params.get("enable_thinking", True))
7675
request = params.get("request", None)
76+
tools = params.get("tools", None)
7777
# Handle stop_str
7878
stop = set()
7979
if isinstance(stop_str, str) and stop_str != "":
@@ -89,7 +89,7 @@ async def stream_chat(self, params: Dict[str, Any]) -> AsyncGenerator:
8989
conversation, mm_data_future = parse_chat_messages_futures(
9090
messages, model_config, tokenizer, content_format="string"
9191
)
92-
tools = params.get("tools", None)
92+
9393
prompt = apply_hf_chat_template(
9494
tokenizer,
9595
conversation=conversation,
@@ -102,11 +102,22 @@ async def stream_chat(self, params: Dict[str, Any]) -> AsyncGenerator:
102102
mm_data = await mm_data_future
103103
inputs = {"multi_modal_data": mm_data, "prompt": prompt}
104104
else:
105+
conversation = messages
106+
prompt = apply_hf_chat_template(
107+
tokenizer,
108+
conversation=conversation,
109+
chat_template=tokenizer.get_chat_template(),
110+
add_generation_prompt=True,
111+
tools=tools,
112+
model_config=await self.engine.get_model_config(),
113+
enable_thinking=enable_thinking,
114+
)
105115
input_ids = params.get("input_ids", None)
106116
inputs = {"prompt": prompt}
107117
if input_ids is not None:
108118
prompt_token_ids = input_ids.tolist()[0]
109119
inputs["prompt_token_ids"] = prompt_token_ids
120+
logger.info(f"prompt:\n{prompt}")
110121
# ----------------------------------------------------------------
111122
# make sampling params in vllm
112123
top_p = max(top_p, 1e-5)
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from pathlib import Path
2+
from typing import Literal
3+
4+
cur_path = Path(__file__).parent
5+
6+
7+
def get_chat_template(model_name: str = "", lang: Literal["en", "zh"] = "en") -> str:
8+
"""获取chat_template
9+
10+
Parameters
11+
----------
12+
model_name : str
13+
模型名称
14+
lang : str, optional
15+
语言, by default en
16+
17+
Returns
18+
-------
19+
str
20+
chat_template
21+
"""
22+
suffix = ""
23+
if lang == "zh":
24+
suffix = "_zh"
25+
if model_name in ["qwen3", "qwen2_5", "qwen"]:
26+
with open(cur_path / f"qwen3{suffix}.jinja", "r", encoding="utf8") as f:
27+
return f.read()
28+
29+
30+
if __name__ == "__main__":
31+
32+
chat_template = get_chat_template("qwen3", lang="zh")
33+
print(chat_template)
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
{%- if tools %}
2+
{{- '<|im_start|>system\n' }}
3+
{%- if messages[0].role == 'system' %}
4+
{{- messages[0].content + '\n\n' }}
5+
{%- else %}
6+
{{- 'You are a helpful assistant. \n\n' }}
7+
{%- endif %}
8+
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9+
{%- for tool in tools %}
10+
{{- "\n" }}
11+
{{- tool | tojson }}
12+
{%- endfor %}
13+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14+
{%- else %}
15+
{%- if messages[0].role == 'system' %}
16+
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
17+
{%- endif %}
18+
{%- endif %}
19+
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
20+
{%- for message in messages[::-1] %}
21+
{%- set index = (messages|length - 1) - loop.index0 %}
22+
{%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
23+
{%- set ns.multi_step_tool = false %}
24+
{%- set ns.last_query_index = index %}
25+
{%- endif %}
26+
{%- endfor %}
27+
{%- for message in messages %}
28+
{%- if message.content is string %}
29+
{%- set content = message.content %}
30+
{%- else %}
31+
{%- set content = '' %}
32+
{%- endif %}
33+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
34+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
35+
{%- elif message.role == "assistant" %}
36+
{%- set reasoning_content = '' %}
37+
{%- if message.reasoning_content is string %}
38+
{%- set reasoning_content = message.reasoning_content %}
39+
{%- else %}
40+
{%- if '</think>' in content %}
41+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
42+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
43+
{%- endif %}
44+
{%- endif %}
45+
{%- if loop.index0 > ns.last_query_index %}
46+
{%- if loop.last or (not loop.last and reasoning_content) %}
47+
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
48+
{%- else %}
49+
{{- '<|im_start|>' + message.role + '\n' + content }}
50+
{%- endif %}
51+
{%- else %}
52+
{{- '<|im_start|>' + message.role + '\n' + content }}
53+
{%- endif %}
54+
{%- if message.tool_calls %}
55+
{%- for tool_call in message.tool_calls %}
56+
{%- if (loop.first and content) or (not loop.first) %}
57+
{{- '\n' }}
58+
{%- endif %}
59+
{%- if tool_call.function %}
60+
{%- set tool_call = tool_call.function %}
61+
{%- endif %}
62+
{{- '<tool_call>\n{"name": "' }}
63+
{{- tool_call.name }}
64+
{{- '", "arguments": ' }}
65+
{%- if tool_call.arguments is string %}
66+
{{- tool_call.arguments }}
67+
{%- else %}
68+
{{- tool_call.arguments | tojson }}
69+
{%- endif %}
70+
{{- '}\n</tool_call>' }}
71+
{%- endfor %}
72+
{%- endif %}
73+
{{- '<|im_end|>\n' }}
74+
{%- elif message.role == "tool" %}
75+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
76+
{{- '<|im_start|>user' }}
77+
{%- endif %}
78+
{{- '\n<tool_response>\n' }}
79+
{{- content }}
80+
{{- '\n</tool_response>' }}
81+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
82+
{{- '<|im_end|>\n' }}
83+
{%- endif %}
84+
{%- endif %}
85+
{%- endfor %}
86+
{%- if add_generation_prompt %}
87+
{{- '<|im_start|>assistant\n' }}
88+
{%- endif %}
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
{%- if tools %}
2+
{{- '<|im_start|>system\n' }}
3+
{%- if messages[0].role == 'system' %}
4+
{{- messages[0].content + '\n\n' }}
5+
{%- else %}
6+
{{- 'You are a helpful assistant. \n\n' }}
7+
{%- endif %}
8+
{{- "# Tools\n\n您可以调用一个或多个function来协助处理用户查询。\n\n在<tools></tools> XML标签中提供了function的签名(即函数的结构信息):\n<tools>" }}
9+
{%- for tool in tools %}
10+
{{- "\n" }}
11+
{{- tool | tojson }}
12+
{%- endfor %}
13+
{{- "\n</tools>\n\n对于单个function的调用, 返回一个包含function name和参数的 JSON 对象,并用 <tool_call></tool_call> XML 标签包裹,形如:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14+
{%- else %}
15+
{%- if messages[0].role == 'system' %}
16+
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
17+
{%- endif %}
18+
{%- endif %}
19+
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
20+
{%- for message in messages[::-1] %}
21+
{%- set index = (messages|length - 1) - loop.index0 %}
22+
{%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
23+
{%- set ns.multi_step_tool = false %}
24+
{%- set ns.last_query_index = index %}
25+
{%- endif %}
26+
{%- endfor %}
27+
{%- for message in messages %}
28+
{%- if message.content is string %}
29+
{%- set content = message.content %}
30+
{%- else %}
31+
{%- set content = '' %}
32+
{%- endif %}
33+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
34+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
35+
{%- elif message.role == "assistant" %}
36+
{%- set reasoning_content = '' %}
37+
{%- if message.reasoning_content is string %}
38+
{%- set reasoning_content = message.reasoning_content %}
39+
{%- else %}
40+
{%- if '</think>' in content %}
41+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
42+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
43+
{%- endif %}
44+
{%- endif %}
45+
{%- if loop.index0 > ns.last_query_index %}
46+
{%- if loop.last or (not loop.last and reasoning_content) %}
47+
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
48+
{%- else %}
49+
{{- '<|im_start|>' + message.role + '\n' + content }}
50+
{%- endif %}
51+
{%- else %}
52+
{{- '<|im_start|>' + message.role + '\n' + content }}
53+
{%- endif %}
54+
{%- if message.tool_calls %}
55+
{%- for tool_call in message.tool_calls %}
56+
{%- if (loop.first and content) or (not loop.first) %}
57+
{{- '\n' }}
58+
{%- endif %}
59+
{%- if tool_call.function %}
60+
{%- set tool_call = tool_call.function %}
61+
{%- endif %}
62+
{{- '<tool_call>\n{"name": "' }}
63+
{{- tool_call.name }}
64+
{{- '", "arguments": ' }}
65+
{%- if tool_call.arguments is string %}
66+
{{- tool_call.arguments }}
67+
{%- else %}
68+
{{- tool_call.arguments | tojson }}
69+
{%- endif %}
70+
{{- '}\n</tool_call>' }}
71+
{%- endfor %}
72+
{%- endif %}
73+
{{- '<|im_end|>\n' }}
74+
{%- elif message.role == "tool" %}
75+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
76+
{{- '<|im_start|>user' }}
77+
{%- endif %}
78+
{{- '\n<tool_response>\n' }}
79+
{{- content }}
80+
{{- '\n</tool_response>' }}
81+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
82+
{{- '<|im_end|>\n' }}
83+
{%- endif %}
84+
{%- endif %}
85+
{%- endfor %}
86+
{%- if add_generation_prompt %}
87+
{{- '<|im_start|>assistant\n' }}
88+
{%- endif %}

gpt_server/model_handler/prompts.py

Lines changed: 34 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,15 @@
11
from typing import Optional
2-
from lmdeploy.model import MODELS, Qwen7BChat, ChatGLM3, get_text
2+
from lmdeploy.model import (
3+
MODELS,
4+
get_text,
5+
HFChatTemplate,
6+
BaseChatTemplate,
7+
)
38
import json
49

510

611
@MODELS.register_module(name="glm4", force=True)
7-
class Glm4Chat(ChatGLM3):
12+
class Glm4Chat(BaseChatTemplate):
813
"""Chat template of glm-4 model."""
914

1015
def __init__(
@@ -125,7 +130,7 @@ def messages2prompt_base(self, messages, sequence_start=True, tools=None, **kwar
125130

126131

127132
@MODELS.register_module(name="qwen2_5")
128-
class Qwen2d5Chat(Qwen7BChat):
133+
class Qwen2d5Chat(BaseChatTemplate):
129134
"""Chat template for Qwen2.5-Instruct series."""
130135

131136
def __init__(
@@ -253,15 +258,15 @@ def match(cls, model_path: str) -> Optional[str]:
253258
if __name__ == "__main__":
254259
chat_template = MODELS.module_dict["qwen2_5"]()
255260
messages = [
256-
{"role": "system", "content": "我的Qwen "},
261+
# {"role": "system", "content": "我的Qwen "},
257262
{"role": "user", "content": "你是谁 "},
258263
]
259264
tools = [
260265
{
261266
"type": "function",
262267
"function": {
263268
"name": "get_weather",
264-
"description": "Get the current weather in a given location",
269+
"description": "在给定位置获得当前的天气",
265270
"parameters": {
266271
"type": "object",
267272
"properties": {
@@ -296,4 +301,28 @@ def match(cls, model_path: str) -> Optional[str]:
296301
]
297302
# tools = None
298303
prompt = chat_template.messages2prompt(messages, True, tools)
304+
# print(prompt)
305+
#
306+
with open(
307+
"/home/dev/liuyu/project/gpt_server/gpt_server/model_handler/chat_template/qwen3_zh.jinja",
308+
"r",
309+
encoding="utf8",
310+
) as f:
311+
qwen3_next = f.read()
312+
313+
# qwen2_5xxx = "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"
314+
# print(qwen3_next)
315+
# assert 0
316+
from transformers import AutoTokenizer
317+
318+
model_path = "/home/dev/model/Qwen/Qwen2___5-32B-Instruct-AWQ/"
319+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
320+
# chat_template
321+
prompt = tokenizer.apply_chat_template(
322+
messages,
323+
tokenize=False,
324+
add_generation_prompt=True,
325+
chat_template=qwen3_next,
326+
tools=tools,
327+
)
299328
print(prompt)

0 commit comments

Comments
 (0)