|
| 1 | +# Copyright (c) Alibaba, Inc. and its affiliates. |
| 2 | +import os |
| 3 | + |
| 4 | +os.environ['CUDA_VISIBLE_DEVICES'] = '0' |
| 5 | +# os.environ['SWIFT_DEBUG'] = '1' |
| 6 | + |
| 7 | + |
| 8 | +def infer(engine: 'InferEngine', infer_request: 'InferRequest'): |
| 9 | + stop = [engine.default_template.agent_template.keyword.observation] # compat react_en |
| 10 | + request_config = RequestConfig(max_tokens=512, temperature=0, stop=stop) |
| 11 | + resp_list = engine.infer([infer_request], request_config) |
| 12 | + query = infer_request.messages[0]['content'] |
| 13 | + response = resp_list[0].choices[0].message.content |
| 14 | + print(f'query: {query}') |
| 15 | + print(f'response: {response}') |
| 16 | + print(f'tool_calls: {resp_list[0].choices[0].message.tool_calls}') |
| 17 | + |
| 18 | + tool = '{"temperature": 32, "condition": "Sunny", "humidity": 50}' |
| 19 | + print(f'tool_response: {tool}') |
| 20 | + infer_request.messages += [{'role': 'assistant', 'content': response}, {'role': 'tool', 'content': tool}] |
| 21 | + resp_list = engine.infer([infer_request], request_config) |
| 22 | + response2 = resp_list[0].choices[0].message.content |
| 23 | + print(f'response2: {response2}') |
| 24 | + |
| 25 | + |
| 26 | +def infer_stream(engine: 'InferEngine', infer_request: 'InferRequest'): |
| 27 | + stop = [engine.default_template.agent_template.keyword.observation] |
| 28 | + request_config = RequestConfig(max_tokens=512, temperature=0, stream=True, stop=stop) |
| 29 | + gen_list = engine.infer([infer_request], request_config) |
| 30 | + query = infer_request.messages[0]['content'] |
| 31 | + response = '' |
| 32 | + print(f'query: {query}\nresponse: ', end='') |
| 33 | + for resp in gen_list[0]: |
| 34 | + if resp is None: |
| 35 | + continue |
| 36 | + delta = resp.choices[0].delta.content |
| 37 | + response += delta |
| 38 | + print(delta, end='', flush=True) |
| 39 | + print() |
| 40 | + print(f'tool_calls: {resp.choices[0].delta.tool_calls}') |
| 41 | + |
| 42 | + tool = '{"temperature": 32, "condition": "Sunny", "humidity": 50}' |
| 43 | + print(f'tool_response: {tool}\nresponse2: ', end='') |
| 44 | + infer_request.messages += [{'role': 'assistant', 'content': response}, {'role': 'tool', 'content': tool}] |
| 45 | + gen_list = engine.infer([infer_request], request_config) |
| 46 | + for resp in gen_list[0]: |
| 47 | + if resp is None: |
| 48 | + continue |
| 49 | + print(resp.choices[0].delta.content, end='', flush=True) |
| 50 | + print() |
| 51 | + |
| 52 | + |
| 53 | +def get_infer_request(): |
| 54 | + return InferRequest( |
| 55 | + messages=[{ |
| 56 | + 'role': 'user', |
| 57 | + 'content': "How's the weather in Beijing today?" |
| 58 | + }], |
| 59 | + tools=[{ |
| 60 | + 'name': 'get_current_weather', |
| 61 | + 'description': 'Get the current weather in a given location', |
| 62 | + 'parameters': { |
| 63 | + 'type': 'object', |
| 64 | + 'properties': { |
| 65 | + 'location': { |
| 66 | + 'type': 'string', |
| 67 | + 'description': 'The city and state, e.g. San Francisco, CA' |
| 68 | + }, |
| 69 | + 'unit': { |
| 70 | + 'type': 'string', |
| 71 | + 'enum': ['celsius', 'fahrenheit'] |
| 72 | + } |
| 73 | + }, |
| 74 | + 'required': ['location'] |
| 75 | + } |
| 76 | + }]) |
| 77 | + |
| 78 | + |
| 79 | +if __name__ == '__main__': |
| 80 | + from swift.llm import InferEngine, InferRequest, PtEngine, RequestConfig |
| 81 | + from swift.plugin import agent_templates |
| 82 | + model = 'Qwen/Qwen2.5-3B' |
| 83 | + adapters = ['output/vx-xxx/checkpoint-xxx'] |
| 84 | + engine = PtEngine(model, adapters=adapters, max_batch_size=8) |
| 85 | + |
| 86 | + # agent_template = agent_templates['hermes']() # react_en/qwen_en/qwen_en_parallel |
| 87 | + # engine.default_template.agent_template = agent_template |
| 88 | + |
| 89 | + infer(engine, get_infer_request()) |
| 90 | + infer_stream(engine, get_infer_request()) |
0 commit comments