-
Notifications
You must be signed in to change notification settings - Fork 26
Expand file tree
/
Copy pathtaiyi2_chat.py
More file actions
59 lines (46 loc) · 1.59 KB
/
taiyi2_chat.py
File metadata and controls
59 lines (46 loc) · 1.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import re
import os
import torch
torch.cuda.empty_cache()
import time
import json
from swift.llm import (
ModelType, get_vllm_engine, get_default_template_type,
get_template, inference_vllm,VllmGenerationConfig,inference_vllm
)
model_path = "../Models/Taiyi2-chat" #model path
device = "cuda:0"
model_type = ModelType.glm4_9b_chat
llm_engine = get_vllm_engine(model_type, torch.bfloat16, model_id_or_path=model_path, gpu_memory_utilization=0.9,max_model_len=8192)
template_type = get_default_template_type(model_type)
template = get_template(template_type, llm_engine.hf_tokenizer)
#Chat
def generate(message, history, repetition_penalty=1.05, max_tokens=500, temperature=0.3,
top_p=0.7, top_k=20):
request_list = [{'query': message, 'history':history}]
#print(request_list)
response = inference_vllm(
llm_engine,
template,
request_list,
generation_config=VllmGenerationConfig(
repetition_penalty=repetition_penalty,
presence_penalty=True,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k,
)
)
#print(response)
return response[0]
if __name__ == '__main__':
history = []
while True:
message = input("Input: ")
if message == "END":
print("END!")
break
response = generate(message, history)
print("Output:"+response['response'])
history = response['history']