11import asyncio
22
3- import io
43import os
54from typing import List
65import uuid
1413)
1514from gpt_server .utils import STATIC_DIR
1615import torch
17- from diffusers import QwenImageEditPipeline
16+ from diffusers import QwenImageEditPlusPipeline
1817
1918root_dir = os .path .dirname (os .path .dirname (os .path .dirname (__file__ )))
2019
@@ -40,9 +39,8 @@ def __init__(
4039 conv_template ,
4140 model_type = "image" ,
4241 )
43- backend = os .environ ["backend" ]
4442 self .device = "cuda" if torch .cuda .is_available () else "cpu"
45- self .pipe = QwenImageEditPipeline .from_pretrained (model_path )
43+ self .pipe = QwenImageEditPlusPipeline .from_pretrained (model_path )
4644 self .pipe .to (torch .bfloat16 )
4745 self .pipe .to (self .device )
4846 self .pipe .set_progress_bar_config (disable = None )
@@ -51,16 +49,18 @@ def __init__(
5149 async def get_image_output (self , params ):
5250 prompt = params ["prompt" ]
5351 response_format = params .get ("response_format" , "b64_json" )
54- bytes_io = await load_base64_or_url (params ["image" ])
55- image = bytesio2image (bytes_io )
52+ image : list = params ["image" ]
53+ image = [bytesio2image (await load_base64_or_url (img )) for img in image ]
54+ # bytes_io = await load_base64_or_url(params["image"])
55+ # image = bytesio2image(bytes_io)
5656 inputs = {
5757 "image" : image ,
5858 "prompt" : prompt ,
5959 "negative_prompt" : None ,
6060 "generator" : torch .manual_seed (0 ),
6161 "true_cfg_scale" : 4.0 ,
6262 "negative_prompt" : " " ,
63- "num_inference_steps" : 50 ,
63+ "num_inference_steps" : 40 ,
6464 }
6565 with torch .inference_mode ():
6666 output = await asyncio .to_thread (self .pipe , ** inputs )
0 commit comments