From 2be0599bbc9c24c4b45ab038a0fadabf477b4b44 Mon Sep 17 00:00:00 2001 From: bobo0810 <1055271769@qq.com> Date: Fri, 16 Jun 2023 16:21:11 +0800 Subject: [PATCH] Text chat added to api.py --- README.md | 27 +++++++++++++++++++++++++++ README_en.md | 11 +++++++++++ api.py | 6 ++++-- 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 76f9752..4602c13 100644 --- a/README.md +++ b/README.md @@ -256,6 +256,9 @@ python web_demo_hf.py python api.py ``` 程序会自动下载 sat 模型,默认部署在本地的 8080 端口,通过 POST 方法进行调用。下面是用`curl`请求的例子,一般而言可以也可以使用代码方法进行POST。 + +1. 图文对话 + ```shell echo "{\"image\":\"$(base64 path/to/example.jpg)\",\"text\":\"描述这张图片\",\"history\":[]}" > temp.json curl -X POST -H "Content-Type: application/json" -d @temp.json http://127.0.0.1:8080 @@ -270,7 +273,31 @@ curl -X POST -H "Content-Type: application/json" -d @temp.json http://127.0.0.1: } ``` +2. 纯文本对话 + +```shell +echo "{\"image\":\"\",\"text\":\"你好\",\"history\":[]}" > temp.json +curl -X POST -H "Content-Type: application/json" -d @temp.json http://127.0.0.1:8080 +``` + +得到的返回值为 + +``` +{ + "result": "你好!请问有什么需要帮助的吗?", + "history": [ + [ + "你好", + "你好!请问有什么需要帮助的吗?" + ] + ], + "status": 200, + "time": "2023-06-16 15:51:08" +} +``` + 我们也提供了使用Huggingface模型的 [api_hf.py](api_hf.py),用法和sat模型的api一致: + ```shell python api_hf.py ``` diff --git a/README_en.md b/README_en.md index 8b47449..0981206 100644 --- a/README_en.md +++ b/README_en.md @@ -187,12 +187,23 @@ First, you need to install additional dependencies pip install fastapi uvicorn, python api.py ``` The program will automatically download the sat model, and by default it will be deployed on local port 8080 and called through the POST method. Below is an example of a request with curl, but in general you can also use a code method to POST. + +1. Image-text chat + ```shell echo "{\"image\":\"$(base64 path/to/example.jpg)\",\"text\":\"Describe this picture\",\"history\":[]}" > temp.json curl -X POST -H "Content-Type: application/json" -d @temp.json http://127.0.0.1:8080 ``` +2. Text chat + +```shell +echo "{\"image\":\"\",\"text\":\"Hello\",\"history\":[]}" > temp.json +curl -X POST -H "Content-Type: application/json" -d @temp.json http://127.0.0.1:8080 +``` + We also provide an api_hf.py that uses the Huggingface model, which works the same way as the sat model's api: + ```shell python api_hf.py ``` diff --git a/api.py b/api.py index 3d15db1..23513d9 100644 --- a/api.py +++ b/api.py @@ -29,8 +29,10 @@ async def visual_glm(request: Request): input_para.update(request_data) is_zh = is_chinese(input_text) - input_data = generate_input(input_text, input_image_encoded, history, input_para) - input_image, gen_kwargs = input_data['input_image'], input_data['gen_kwargs'] + image_is_encoded = False if input_image_encoded == "" else True + input_data = generate_input(input_text, input_image_encoded, history, input_para, image_is_encoded) + input_image = None if input_image_encoded == "" else input_data["input_image"] + gen_kwargs = input_data["gen_kwargs"] with torch.no_grad(): answer, history, _ = chat(None, model, tokenizer, input_text, history=history, image=input_image, \ max_length=gen_kwargs['max_length'], top_p=gen_kwargs['top_p'], \