@@ -26,6 +26,33 @@ def tensor2pil(tensor: torch.Tensor) -> Image.Image:
2626 pil_image = Image .fromarray (np_image )
2727 return pil_image
2828
29+ def pil_to_bytesio (image , filename = "image.png" ):
30+ image = tensor2pil (image )
31+ buffer = BytesIO ()
32+ image .save (buffer , format = "PNG" )
33+ buffer .seek (0 )
34+ buffer .name = filename
35+ return buffer
36+
37+ def mask_bytesio (mask_tensor , filename = "mask_alpha.png" ):
38+ if mask_tensor == None :
39+ return None
40+ if mask_tensor .dim () == 3 and mask_tensor .shape [0 ] == 1 :
41+ mask_tensor = mask_tensor .squeeze (0 )
42+
43+ mask_np = (mask_tensor .clamp (0 , 1 ).mul (255 ).byte ().cpu ().numpy ())
44+
45+ h , w = mask_np .shape
46+ rgba_image = Image .new ("RGBA" , (w , h ), (255 , 255 , 255 , 0 ))
47+ alpha_channel = Image .fromarray (mask_np , mode = 'L' )
48+ rgba_image .putalpha (alpha_channel )
49+
50+ buffer = BytesIO ()
51+ rgba_image .save (buffer , format = "PNG" )
52+ buffer .seek (0 )
53+ buffer .name = filename
54+ return buffer
55+
2956def lang_list ():
3057 lang_list = ["None" ]
3158 for i in LANGUAGES .items ():
@@ -156,19 +183,13 @@ def python_function(self, function, input=None, input2=None, input3=None):
156183 ]
157184}
158185
159- def dic2list (dic ):
160- l = []
161- for i in dic :
162- l += [i ]
163- return l
164-
165186class API_chatbot :
166187 @classmethod
167188 def INPUT_TYPES (s ):
168189 return {
169190 "required" : {
170- "chatbot" : (dic2list (model_list ),),
171- "preset" : (dic2list (preset_prompt ),),
191+ "chatbot" : (list (model_list ),),
192+ "preset" : (list (preset_prompt ),),
172193 "APIkey" : ("STRING" , {"default" : "" , "multiline" : False , "tooltip" : """
173194Get API Gemini: https://aistudio.google.com/app/apikey
174195Get API OpenAI: https://platform.openai.com/settings/organization/api-keys
@@ -315,12 +336,96 @@ def api_dalle(self, OpenAI_API, size, seed, prompt,translate):
315336 quality = "standard" ,
316337 n = 1 ,
317338 )
318- cls = ALL_NODE ["SDVN Load Image Url" ]
319339 image_url = response .data [0 ].url
320340 print (image_url )
321- image = cls ().load_image_url (image_url )["result" ][0 ]
341+ image = ALL_NODE [ "SDVN Load Image Url" ] ().load_image_url (image_url )["result" ][0 ]
322342 return (image ,)
323343
344+ class API_GPT_image :
345+ @classmethod
346+ def INPUT_TYPES (s ):
347+ return {
348+ "required" : {
349+ "OpenAI_API" : ("STRING" , {"default" : "" , "multiline" : False , "tooltip" : "Get API: https://platform.openai.com/settings/organization/api-keys" }),
350+ "size" : (["auto" ,'1024x1024' , '1536x1024' , '1024x1536' ],{"default" : "auto" }),
351+ "seed" : ("INT" , {"default" : 0 , "min" : 0 , "max" : 0xffffffffffffffff , "tooltip" : "The random seed" }),
352+ "prompt" : ("STRING" , {"default" : "" , "multiline" : True , "placeholder" : "Get API: https://platform.openai.com/settings/organization/api-keys" }),
353+ "quality" : (["auto" ,"low" ,"medium" ,"high" ], {"default" : "medium" ,}),
354+ "background" : (["opaque" ,"transparent" ], {"default" : "opaque" ,}),
355+ "n" : ("INT" , {"default" : 1 , "min" : 1 , "max" : 4 }),
356+ "translate" : (lang_list (),),
357+ },
358+ "optional" : {
359+ "image" : ("IMAGE" ,),
360+ "mask" : ("MASK" ,)
361+ }
362+ }
363+
364+ CATEGORY = "📂 SDVN/💬 API"
365+
366+ RETURN_TYPES = ("IMAGE" ,)
367+ INPUT_IS_LIST = True
368+ OUTPUT_IS_LIST = (True ,)
369+ FUNCTION = "API_GPT_image"
370+
371+ def API_GPT_image (self , OpenAI_API , size , seed , prompt , quality , background , n , translate , image = None , mask = None ):
372+ OpenAI_API = OpenAI_API [0 ]
373+ size = size [0 ]
374+ seed = seed [0 ]
375+ prompt = prompt [0 ]
376+ quality = quality [0 ]
377+ background = background [0 ]
378+ n = n [0 ]
379+
380+ translate = translate [0 ]
381+ if OpenAI_API == "" :
382+ api_list = api_check ()
383+ OpenAI_API = api_list ["OpenAI" ]
384+ if "DPRandomGenerator" in ALL_NODE :
385+ prompt = ALL_NODE ["DPRandomGenerator" ]().get_prompt (prompt , seed , 'No' )[0 ]
386+ prompt = ALL_NODE ["SDVN Translate" ]().ggtranslate (prompt ,translate )[0 ]
387+
388+ client = OpenAI (
389+ api_key = OpenAI_API
390+ )
391+ if image == None :
392+ result = client .images .generate (
393+ model = "gpt-image-1" ,
394+ prompt = prompt ,
395+ size = size ,
396+ quality = quality ,
397+ background = background ,
398+ moderation = "low" ,
399+ n = n
400+ )
401+ elif mask == None :
402+ result = client .images .edit (
403+ model = "gpt-image-1" ,
404+ prompt = prompt ,
405+ size = size ,
406+ quality = quality ,
407+ image = [pil_to_bytesio (img ) for img in image ],
408+ n = n ,
409+ )
410+ else :
411+ result = client .images .edit (
412+ model = "gpt-image-1" ,
413+ prompt = prompt ,
414+ size = size ,
415+ quality = quality ,
416+ image = pil_to_bytesio (image [0 ]),
417+ n = n ,
418+ mask = mask_bytesio (mask [0 ]),
419+ )
420+ images = []
421+ for i in range (n ):
422+ image_base64 = result .data [i ].b64_json
423+ image_bytes = base64 .b64decode (image_base64 )
424+ image_pil = Image .open (BytesIO (image_bytes ))
425+ image_ten = i2tensor (image_pil )
426+ images .append (image_ten )
427+ return (images ,)
428+
324429class Gemini_Flash2_Image :
325430 @classmethod
326431 def INPUT_TYPES (s ):
@@ -593,7 +698,8 @@ def joy_caption(s, image, caption_type, caption_length, extra_options, name_inpu
593698 "SDVN IC-Light v2" : ic_light_v2 ,
594699 "SDVN Joy Caption" : joy_caption ,
595700 "SDVN Google Imagen" : API_Imagen ,
596- "SDVN Gemini Flash 2 Image" : Gemini_Flash2_Image ,
701+ "SDVN Gemini Flash 2 Image" : Gemini_Flash2_Image ,
702+ "SDVN GPT Image" : API_GPT_image ,
597703}
598704
599705NODE_DISPLAY_NAME_MAPPINGS = {
@@ -603,5 +709,6 @@ def joy_caption(s, image, caption_type, caption_length, extra_options, name_inpu
603709 "SDVN IC-Light v2" : "✨ IC-Light v2" ,
604710 "SDVN Joy Caption" : "✨ Joy Caption" ,
605711 "SDVN Google Imagen" : "🎨 Google Imagen" ,
606- "SDVN Gemini Flash 2 Image" : "🎨 Gemini Flash 2 Image"
712+ "SDVN Gemini Flash 2 Image" : "🎨 Gemini Flash 2 Image" ,
713+ "SDVN GPT Image" : "🎨 GPT Image"
607714}
0 commit comments