Skip to content

Commit 8b1054d

Browse files
committed
chore(api-nodes): remove chat widgets from OpenAI/Gemini nodes
1 parent f66183a commit 8b1054d

File tree

2 files changed

+11
-112
lines changed

2 files changed

+11
-112
lines changed

comfy_api_nodes/nodes_gemini.py

Lines changed: 2 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,7 @@
44
"""
55

66
import base64
7-
import json
87
import os
9-
import time
10-
import uuid
118
from enum import Enum
129
from io import BytesIO
1310
from typing import Literal
@@ -41,7 +38,6 @@
4138
validate_string,
4239
video_to_base64_string,
4340
)
44-
from server import PromptServer
4541

4642
GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini"
4743
GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB
@@ -363,29 +359,6 @@ async def execute(
363359
)
364360

365361
output_text = get_text_from_response(response)
366-
if output_text:
367-
# Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button.
368-
render_spec = {
369-
"node_id": cls.hidden.unique_id,
370-
"component": "ChatHistoryWidget",
371-
"props": {
372-
"history": json.dumps(
373-
[
374-
{
375-
"prompt": prompt,
376-
"response": output_text,
377-
"response_id": str(uuid.uuid4()),
378-
"timestamp": time.time(),
379-
}
380-
]
381-
),
382-
},
383-
}
384-
PromptServer.instance.send_sync(
385-
"display_component",
386-
render_spec,
387-
)
388-
389362
return IO.NodeOutput(output_text or "Empty response from Gemini model...")
390363

391364

@@ -581,30 +554,7 @@ async def execute(
581554
response_model=GeminiGenerateContentResponse,
582555
price_extractor=calculate_tokens_price,
583556
)
584-
585-
output_text = get_text_from_response(response)
586-
if output_text:
587-
render_spec = {
588-
"node_id": cls.hidden.unique_id,
589-
"component": "ChatHistoryWidget",
590-
"props": {
591-
"history": json.dumps(
592-
[
593-
{
594-
"prompt": prompt,
595-
"response": output_text,
596-
"response_id": str(uuid.uuid4()),
597-
"timestamp": time.time(),
598-
}
599-
]
600-
),
601-
},
602-
}
603-
PromptServer.instance.send_sync(
604-
"display_component",
605-
render_spec,
606-
)
607-
return IO.NodeOutput(get_image_from_response(response), output_text)
557+
return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response))
608558

609559

610560
class GeminiImage2(IO.ComfyNode):
@@ -724,30 +674,7 @@ async def execute(
724674
response_model=GeminiGenerateContentResponse,
725675
price_extractor=calculate_tokens_price,
726676
)
727-
728-
output_text = get_text_from_response(response)
729-
if output_text:
730-
render_spec = {
731-
"node_id": cls.hidden.unique_id,
732-
"component": "ChatHistoryWidget",
733-
"props": {
734-
"history": json.dumps(
735-
[
736-
{
737-
"prompt": prompt,
738-
"response": output_text,
739-
"response_id": str(uuid.uuid4()),
740-
"timestamp": time.time(),
741-
}
742-
]
743-
),
744-
},
745-
}
746-
PromptServer.instance.send_sync(
747-
"display_component",
748-
render_spec,
749-
)
750-
return IO.NodeOutput(get_image_from_response(response), output_text)
677+
return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response))
751678

752679

753680
class GeminiExtension(ComfyExtension):

comfy_api_nodes/nodes_openai.py

Lines changed: 9 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,10 @@
11
from io import BytesIO
2-
from typing import Optional, Union
3-
import json
42
import os
5-
import time
6-
import uuid
73
from enum import Enum
84
from inspect import cleandoc
95
import numpy as np
106
import torch
117
from PIL import Image
12-
from server import PromptServer
138
import folder_paths
149
import base64
1510
from comfy_api.latest import IO, ComfyExtension
@@ -587,11 +582,11 @@ def tensor_to_input_image_content(
587582
def create_input_message_contents(
588583
cls,
589584
prompt: str,
590-
image: Optional[torch.Tensor] = None,
591-
files: Optional[list[InputFileContent]] = None,
585+
image: torch.Tensor | None = None,
586+
files: list[InputFileContent] | None = None,
592587
) -> InputMessageContentList:
593588
"""Create a list of input message contents from prompt and optional image."""
594-
content_list: list[Union[InputContent, InputTextContent, InputImageContent, InputFileContent]] = [
589+
content_list: list[InputContent | InputTextContent | InputImageContent | InputFileContent] = [
595590
InputTextContent(text=prompt, type="input_text"),
596591
]
597592
if image is not None:
@@ -617,9 +612,9 @@ async def execute(
617612
prompt: str,
618613
persist_context: bool = False,
619614
model: SupportedOpenAIModel = SupportedOpenAIModel.gpt_5.value,
620-
images: Optional[torch.Tensor] = None,
621-
files: Optional[list[InputFileContent]] = None,
622-
advanced_options: Optional[CreateModelResponseProperties] = None,
615+
images: torch.Tensor | None = None,
616+
files: list[InputFileContent] | None = None,
617+
advanced_options: CreateModelResponseProperties | None = None,
623618
) -> IO.NodeOutput:
624619
validate_string(prompt, strip_whitespace=False)
625620

@@ -660,30 +655,7 @@ async def execute(
660655
status_extractor=lambda response: response.status,
661656
completed_statuses=["incomplete", "completed"]
662657
)
663-
output_text = cls.get_text_from_message_content(cls.get_message_content_from_response(result_response))
664-
665-
# Update history
666-
render_spec = {
667-
"node_id": cls.hidden.unique_id,
668-
"component": "ChatHistoryWidget",
669-
"props": {
670-
"history": json.dumps(
671-
[
672-
{
673-
"prompt": prompt,
674-
"response": output_text,
675-
"response_id": str(uuid.uuid4()),
676-
"timestamp": time.time(),
677-
}
678-
]
679-
),
680-
},
681-
}
682-
PromptServer.instance.send_sync(
683-
"display_component",
684-
render_spec,
685-
)
686-
return IO.NodeOutput(output_text)
658+
return IO.NodeOutput(cls.get_text_from_message_content(cls.get_message_content_from_response(result_response)))
687659

688660

689661
class OpenAIInputFiles(IO.ComfyNode):
@@ -790,8 +762,8 @@ def define_schema(cls):
790762
def execute(
791763
cls,
792764
truncation: bool,
793-
instructions: Optional[str] = None,
794-
max_output_tokens: Optional[int] = None,
765+
instructions: str | None = None,
766+
max_output_tokens: int | None = None,
795767
) -> IO.NodeOutput:
796768
"""
797769
Configure advanced options for the OpenAI Chat Node.

0 commit comments

Comments
 (0)