Skip to content

Commit e31dd4a

Browse files
committed
add download_model file
1 parent 8115fa9 commit e31dd4a

File tree

11 files changed

+55
-10
lines changed

11 files changed

+55
-10
lines changed

gpt_server/model_worker/baichuan.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def __init__(
8484
self.stop = [
8585
self.tokenizer.decode(skip_word) for skip_word in self.stop_words_ids
8686
]
87-
logger.info(f"BaiChuan 停用词: {self.stop}")
87+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
8888

8989
async def generate_stream_gate(self, params):
9090
self.call_ct += 1

gpt_server/model_worker/chatglm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(
3939
self.stop_words_ids.append(self.tokenizer.convert_tokens_to_ids(i))
4040
except Exception as e:
4141
pass
42-
logger.info(f"chatglm停用词: {self.stop}")
42+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
4343

4444
def build_chat_input(self, query, history=None, role="user"):
4545
if history is None:

gpt_server/model_worker/deepseek.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def __init__(
3636
self.stop = [
3737
self.tokenizer.decode(skip_word) for skip_word in self.stop_words_ids
3838
]
39-
logger.info(f"DeepSeek停用词: {self.stop}")
39+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
4040

4141
async def generate_stream_gate(self, params):
4242
self.call_ct += 1

gpt_server/model_worker/internlm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def __init__(
3838
self.stop = [
3939
self.tokenizer.decode(skip_word) for skip_word in self.stop_words_ids
4040
]
41-
logger.info(f"Internlm停用词:: {self.stop}")
41+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
4242
self.other_config = {
4343
"chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
4444
}

gpt_server/model_worker/internvl2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def __init__(
3535
self.stop = [
3636
self.tokenizer.decode(skip_word) for skip_word in self.stop_words_ids
3737
]
38-
logger.info(f"InternVL2停用词: {self.stop}")
38+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
3939

4040
async def generate_stream_gate(self, params):
4141
self.call_ct += 1

gpt_server/model_worker/llama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def __init__(
3737
self.stop = [
3838
self.tokenizer.decode(skip_word) for skip_word in self.stop_words_ids
3939
]
40-
logger.info(f"Llama停用词: {self.stop}")
40+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
4141

4242
async def generate_stream_gate(self, params):
4343
self.call_ct += 1

gpt_server/model_worker/minicpmv.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def __init__(
3535
self.stop = [
3636
self.tokenizer.decode(skip_word) for skip_word in self.stop_words_ids
3737
]
38-
logger.info(f"MiniCPMV停用词: {self.stop}")
38+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
3939

4040
async def generate_stream_gate(self, params):
4141
self.call_ct += 1

gpt_server/model_worker/mixtral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def __init__(
4040
self.stop = [
4141
self.tokenizer.decode(skip_word) for skip_word in self.stop_words_ids
4242
]
43-
logger.info(f"Mixtral停用词: {self.stop}")
43+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
4444

4545
async def generate_stream_gate(self, params):
4646
self.call_ct += 1

gpt_server/model_worker/qwen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def __init__(
4242
]
4343
# 拓展额外的stop
4444
self.stop.extend(["Observation:"])
45-
logger.info(f"qwen停用词: {self.stop}")
45+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
4646
self.other_config = {
4747
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}"
4848
}

gpt_server/model_worker/yi.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def __init__(
3535
self.stop = [
3636
self.tokenizer.decode(skip_word) for skip_word in self.stop_words_ids
3737
]
38-
logger.info(f"Yi停用词: {self.stop}")
38+
logger.info(f"{model_names[0]} 停用词: {self.stop}")
3939

4040
async def generate_stream_gate(self, params):
4141
self.call_ct += 1

0 commit comments

Comments
 (0)