Skip to content

Commit d8433c6

Browse files
authored
chore(api-nodes): remove chat widgets from OpenAI/Gemini nodes (comfyanonymous#10861)
1 parent dd41b74 commit d8433c6

File tree

2 files changed

+11
-112
lines changed

2 files changed

+11
-112
lines changed

comfy_api_nodes/nodes_gemini.py

Lines changed: 2 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,7 @@
44
"""
55

66
import base64
7-
import json
87
import os
9-
import time
10-
import uuid
118
from enum import Enum
129
from io import BytesIO
1310
from typing import Literal
@@ -43,7 +40,6 @@
4340
validate_string,
4441
video_to_base64_string,
4542
)
46-
from server import PromptServer
4743

4844
GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini"
4945
GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB
@@ -384,29 +380,6 @@ async def execute(
384380
)
385381

386382
output_text = get_text_from_response(response)
387-
if output_text:
388-
# Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button.
389-
render_spec = {
390-
"node_id": cls.hidden.unique_id,
391-
"component": "ChatHistoryWidget",
392-
"props": {
393-
"history": json.dumps(
394-
[
395-
{
396-
"prompt": prompt,
397-
"response": output_text,
398-
"response_id": str(uuid.uuid4()),
399-
"timestamp": time.time(),
400-
}
401-
]
402-
),
403-
},
404-
}
405-
PromptServer.instance.send_sync(
406-
"display_component",
407-
render_spec,
408-
)
409-
410383
return IO.NodeOutput(output_text or "Empty response from Gemini model...")
411384

412385

@@ -601,30 +574,7 @@ async def execute(
601574
response_model=GeminiGenerateContentResponse,
602575
price_extractor=calculate_tokens_price,
603576
)
604-
605-
output_text = get_text_from_response(response)
606-
if output_text:
607-
render_spec = {
608-
"node_id": cls.hidden.unique_id,
609-
"component": "ChatHistoryWidget",
610-
"props": {
611-
"history": json.dumps(
612-
[
613-
{
614-
"prompt": prompt,
615-
"response": output_text,
616-
"response_id": str(uuid.uuid4()),
617-
"timestamp": time.time(),
618-
}
619-
]
620-
),
621-
},
622-
}
623-
PromptServer.instance.send_sync(
624-
"display_component",
625-
render_spec,
626-
)
627-
return IO.NodeOutput(get_image_from_response(response), output_text)
577+
return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response))
628578

629579

630580
class GeminiImage2(IO.ComfyNode):
@@ -744,30 +694,7 @@ async def execute(
744694
response_model=GeminiGenerateContentResponse,
745695
price_extractor=calculate_tokens_price,
746696
)
747-
748-
output_text = get_text_from_response(response)
749-
if output_text:
750-
render_spec = {
751-
"node_id": cls.hidden.unique_id,
752-
"component": "ChatHistoryWidget",
753-
"props": {
754-
"history": json.dumps(
755-
[
756-
{
757-
"prompt": prompt,
758-
"response": output_text,
759-
"response_id": str(uuid.uuid4()),
760-
"timestamp": time.time(),
761-
}
762-
]
763-
),
764-
},
765-
}
766-
PromptServer.instance.send_sync(
767-
"display_component",
768-
render_spec,
769-
)
770-
return IO.NodeOutput(get_image_from_response(response), output_text)
697+
return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response))
771698

772699

773700
class GeminiExtension(ComfyExtension):

comfy_api_nodes/nodes_openai.py

Lines changed: 9 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,10 @@
11
from io import BytesIO
2-
from typing import Optional, Union
3-
import json
42
import os
5-
import time
6-
import uuid
73
from enum import Enum
84
from inspect import cleandoc
95
import numpy as np
106
import torch
117
from PIL import Image
12-
from server import PromptServer
138
import folder_paths
149
import base64
1510
from comfy_api.latest import IO, ComfyExtension
@@ -587,11 +582,11 @@ def tensor_to_input_image_content(
587582
def create_input_message_contents(
588583
cls,
589584
prompt: str,
590-
image: Optional[torch.Tensor] = None,
591-
files: Optional[list[InputFileContent]] = None,
585+
image: torch.Tensor | None = None,
586+
files: list[InputFileContent] | None = None,
592587
) -> InputMessageContentList:
593588
"""Create a list of input message contents from prompt and optional image."""
594-
content_list: list[Union[InputContent, InputTextContent, InputImageContent, InputFileContent]] = [
589+
content_list: list[InputContent | InputTextContent | InputImageContent | InputFileContent] = [
595590
InputTextContent(text=prompt, type="input_text"),
596591
]
597592
if image is not None:
@@ -617,9 +612,9 @@ async def execute(
617612
prompt: str,
618613
persist_context: bool = False,
619614
model: SupportedOpenAIModel = SupportedOpenAIModel.gpt_5.value,
620-
images: Optional[torch.Tensor] = None,
621-
files: Optional[list[InputFileContent]] = None,
622-
advanced_options: Optional[CreateModelResponseProperties] = None,
615+
images: torch.Tensor | None = None,
616+
files: list[InputFileContent] | None = None,
617+
advanced_options: CreateModelResponseProperties | None = None,
623618
) -> IO.NodeOutput:
624619
validate_string(prompt, strip_whitespace=False)
625620

@@ -660,30 +655,7 @@ async def execute(
660655
status_extractor=lambda response: response.status,
661656
completed_statuses=["incomplete", "completed"]
662657
)
663-
output_text = cls.get_text_from_message_content(cls.get_message_content_from_response(result_response))
664-
665-
# Update history
666-
render_spec = {
667-
"node_id": cls.hidden.unique_id,
668-
"component": "ChatHistoryWidget",
669-
"props": {
670-
"history": json.dumps(
671-
[
672-
{
673-
"prompt": prompt,
674-
"response": output_text,
675-
"response_id": str(uuid.uuid4()),
676-
"timestamp": time.time(),
677-
}
678-
]
679-
),
680-
},
681-
}
682-
PromptServer.instance.send_sync(
683-
"display_component",
684-
render_spec,
685-
)
686-
return IO.NodeOutput(output_text)
658+
return IO.NodeOutput(cls.get_text_from_message_content(cls.get_message_content_from_response(result_response)))
687659

688660

689661
class OpenAIInputFiles(IO.ComfyNode):
@@ -790,8 +762,8 @@ def define_schema(cls):
790762
def execute(
791763
cls,
792764
truncation: bool,
793-
instructions: Optional[str] = None,
794-
max_output_tokens: Optional[int] = None,
765+
instructions: str | None = None,
766+
max_output_tokens: int | None = None,
795767
) -> IO.NodeOutput:
796768
"""
797769
Configure advanced options for the OpenAI Chat Node.

0 commit comments

Comments
 (0)