Skip to content

Commit 1a59686

Browse files
authored
Merge branch 'comfyanonymous:master' into offloader-maifee
2 parents 6d96d26 + 5c7b08c commit 1a59686

File tree

3 files changed

+143
-125
lines changed

3 files changed

+143
-125
lines changed

comfy_api_nodes/apis/bfl_api.py

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,29 @@ class BFLFluxProGenerateRequest(BaseModel):
7070
# )
7171

7272

73+
class Flux2ProGenerateRequest(BaseModel):
74+
prompt: str = Field(...)
75+
width: int = Field(1024, description="Must be a multiple of 32.")
76+
height: int = Field(768, description="Must be a multiple of 32.")
77+
seed: int | None = Field(None)
78+
prompt_upsampling: bool | None = Field(None)
79+
input_image: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
80+
input_image_2: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
81+
input_image_3: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
82+
input_image_4: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
83+
input_image_5: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
84+
input_image_6: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
85+
input_image_7: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
86+
input_image_8: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
87+
input_image_9: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
88+
safety_tolerance: int | None = Field(
89+
5, description="Tolerance level for input and output moderation. Value 0 being most strict.", ge=0, le=5
90+
)
91+
output_format: str | None = Field(
92+
"png", description="Output format for the generated image. Can be 'jpeg' or 'png'."
93+
)
94+
95+
7396
class BFLFluxKontextProGenerateRequest(BaseModel):
7497
prompt: str = Field(..., description='The text prompt for what you wannt to edit.')
7598
input_image: Optional[str] = Field(None, description='Image to edit in base64 format')
@@ -109,8 +132,9 @@ class BFLFluxProUltraGenerateRequest(BaseModel):
109132

110133

111134
class BFLFluxProGenerateResponse(BaseModel):
112-
id: str = Field(..., description='The unique identifier for the generation task.')
113-
polling_url: str = Field(..., description='URL to poll for the generation result.')
135+
id: str = Field(..., description="The unique identifier for the generation task.")
136+
polling_url: str = Field(..., description="URL to poll for the generation result.")
137+
cost: float | None = Field(None, description="Price in cents")
114138

115139

116140
class BFLStatus(str, Enum):

comfy_api_nodes/nodes_bfl.py

Lines changed: 115 additions & 123 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,24 @@
11
from inspect import cleandoc
2-
from typing import Optional
32

43
import torch
4+
from pydantic import BaseModel
55
from typing_extensions import override
66

77
from comfy_api.latest import IO, ComfyExtension
88
from comfy_api_nodes.apis.bfl_api import (
99
BFLFluxExpandImageRequest,
1010
BFLFluxFillImageRequest,
1111
BFLFluxKontextProGenerateRequest,
12-
BFLFluxProGenerateRequest,
1312
BFLFluxProGenerateResponse,
1413
BFLFluxProUltraGenerateRequest,
1514
BFLFluxStatusResponse,
1615
BFLStatus,
16+
Flux2ProGenerateRequest,
1717
)
1818
from comfy_api_nodes.util import (
1919
ApiEndpoint,
2020
download_url_to_image_tensor,
21+
get_number_of_images,
2122
poll_op,
2223
resize_mask_to_image,
2324
sync_op,
@@ -116,7 +117,7 @@ async def execute(
116117
prompt_upsampling: bool = False,
117118
raw: bool = False,
118119
seed: int = 0,
119-
image_prompt: Optional[torch.Tensor] = None,
120+
image_prompt: torch.Tensor | None = None,
120121
image_prompt_strength: float = 0.1,
121122
) -> IO.NodeOutput:
122123
if image_prompt is None:
@@ -230,7 +231,7 @@ async def execute(
230231
aspect_ratio: str,
231232
guidance: float,
232233
steps: int,
233-
input_image: Optional[torch.Tensor] = None,
234+
input_image: torch.Tensor | None = None,
234235
seed=0,
235236
prompt_upsampling=False,
236237
) -> IO.NodeOutput:
@@ -280,124 +281,6 @@ class FluxKontextMaxImageNode(FluxKontextProImageNode):
280281
DISPLAY_NAME = "Flux.1 Kontext [max] Image"
281282

282283

283-
class FluxProImageNode(IO.ComfyNode):
284-
"""
285-
Generates images synchronously based on prompt and resolution.
286-
"""
287-
288-
@classmethod
289-
def define_schema(cls) -> IO.Schema:
290-
return IO.Schema(
291-
node_id="FluxProImageNode",
292-
display_name="Flux 1.1 [pro] Image",
293-
category="api node/image/BFL",
294-
description=cleandoc(cls.__doc__ or ""),
295-
inputs=[
296-
IO.String.Input(
297-
"prompt",
298-
multiline=True,
299-
default="",
300-
tooltip="Prompt for the image generation",
301-
),
302-
IO.Boolean.Input(
303-
"prompt_upsampling",
304-
default=False,
305-
tooltip="Whether to perform upsampling on the prompt. "
306-
"If active, automatically modifies the prompt for more creative generation, "
307-
"but results are nondeterministic (same seed will not produce exactly the same result).",
308-
),
309-
IO.Int.Input(
310-
"width",
311-
default=1024,
312-
min=256,
313-
max=1440,
314-
step=32,
315-
),
316-
IO.Int.Input(
317-
"height",
318-
default=768,
319-
min=256,
320-
max=1440,
321-
step=32,
322-
),
323-
IO.Int.Input(
324-
"seed",
325-
default=0,
326-
min=0,
327-
max=0xFFFFFFFFFFFFFFFF,
328-
control_after_generate=True,
329-
tooltip="The random seed used for creating the noise.",
330-
),
331-
IO.Image.Input(
332-
"image_prompt",
333-
optional=True,
334-
),
335-
# "image_prompt_strength": (
336-
# IO.FLOAT,
337-
# {
338-
# "default": 0.1,
339-
# "min": 0.0,
340-
# "max": 1.0,
341-
# "step": 0.01,
342-
# "tooltip": "Blend between the prompt and the image prompt.",
343-
# },
344-
# ),
345-
],
346-
outputs=[IO.Image.Output()],
347-
hidden=[
348-
IO.Hidden.auth_token_comfy_org,
349-
IO.Hidden.api_key_comfy_org,
350-
IO.Hidden.unique_id,
351-
],
352-
is_api_node=True,
353-
)
354-
355-
@classmethod
356-
async def execute(
357-
cls,
358-
prompt: str,
359-
prompt_upsampling,
360-
width: int,
361-
height: int,
362-
seed=0,
363-
image_prompt=None,
364-
# image_prompt_strength=0.1,
365-
) -> IO.NodeOutput:
366-
image_prompt = image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt)
367-
initial_response = await sync_op(
368-
cls,
369-
ApiEndpoint(
370-
path="/proxy/bfl/flux-pro-1.1/generate",
371-
method="POST",
372-
),
373-
response_model=BFLFluxProGenerateResponse,
374-
data=BFLFluxProGenerateRequest(
375-
prompt=prompt,
376-
prompt_upsampling=prompt_upsampling,
377-
width=width,
378-
height=height,
379-
seed=seed,
380-
image_prompt=image_prompt,
381-
),
382-
)
383-
response = await poll_op(
384-
cls,
385-
ApiEndpoint(initial_response.polling_url),
386-
response_model=BFLFluxStatusResponse,
387-
status_extractor=lambda r: r.status,
388-
progress_extractor=lambda r: r.progress,
389-
completed_statuses=[BFLStatus.ready],
390-
failed_statuses=[
391-
BFLStatus.request_moderated,
392-
BFLStatus.content_moderated,
393-
BFLStatus.error,
394-
BFLStatus.task_not_found,
395-
],
396-
queued_statuses=[],
397-
)
398-
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
399-
400-
401284
class FluxProExpandNode(IO.ComfyNode):
402285
"""
403286
Outpaints image based on prompt.
@@ -640,16 +523,125 @@ async def execute(
640523
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
641524

642525

526+
class Flux2ProImageNode(IO.ComfyNode):
527+
528+
@classmethod
529+
def define_schema(cls) -> IO.Schema:
530+
return IO.Schema(
531+
node_id="Flux2ProImageNode",
532+
display_name="Flux.2 [pro] Image",
533+
category="api node/image/BFL",
534+
description="Generates images synchronously based on prompt and resolution.",
535+
inputs=[
536+
IO.String.Input(
537+
"prompt",
538+
multiline=True,
539+
default="",
540+
tooltip="Prompt for the image generation or edit",
541+
),
542+
IO.Int.Input(
543+
"width",
544+
default=1024,
545+
min=256,
546+
max=2048,
547+
step=32,
548+
),
549+
IO.Int.Input(
550+
"height",
551+
default=768,
552+
min=256,
553+
max=2048,
554+
step=32,
555+
),
556+
IO.Int.Input(
557+
"seed",
558+
default=0,
559+
min=0,
560+
max=0xFFFFFFFFFFFFFFFF,
561+
control_after_generate=True,
562+
tooltip="The random seed used for creating the noise.",
563+
),
564+
IO.Boolean.Input(
565+
"prompt_upsampling",
566+
default=False,
567+
tooltip="Whether to perform upsampling on the prompt. "
568+
"If active, automatically modifies the prompt for more creative generation, "
569+
"but results are nondeterministic (same seed will not produce exactly the same result).",
570+
),
571+
IO.Image.Input("images", optional=True, tooltip="Up to 4 images to be used as references."),
572+
],
573+
outputs=[IO.Image.Output()],
574+
hidden=[
575+
IO.Hidden.auth_token_comfy_org,
576+
IO.Hidden.api_key_comfy_org,
577+
IO.Hidden.unique_id,
578+
],
579+
is_api_node=True,
580+
)
581+
582+
@classmethod
583+
async def execute(
584+
cls,
585+
prompt: str,
586+
width: int,
587+
height: int,
588+
seed: int,
589+
prompt_upsampling: bool,
590+
images: torch.Tensor | None = None,
591+
) -> IO.NodeOutput:
592+
reference_images = {}
593+
if images is not None:
594+
if get_number_of_images(images) > 9:
595+
raise ValueError("The current maximum number of supported images is 9.")
596+
for image_index in range(images.shape[0]):
597+
key_name = f"input_image_{image_index + 1}" if image_index else "input_image"
598+
reference_images[key_name] = tensor_to_base64_string(images[image_index], total_pixels=2048 * 2048)
599+
initial_response = await sync_op(
600+
cls,
601+
ApiEndpoint(path="/proxy/bfl/flux-2-pro/generate", method="POST"),
602+
response_model=BFLFluxProGenerateResponse,
603+
data=Flux2ProGenerateRequest(
604+
prompt=prompt,
605+
width=width,
606+
height=height,
607+
seed=seed,
608+
prompt_upsampling=prompt_upsampling,
609+
**reference_images,
610+
),
611+
)
612+
613+
def price_extractor(_r: BaseModel) -> float | None:
614+
return None if initial_response.cost is None else initial_response.cost / 100
615+
616+
response = await poll_op(
617+
cls,
618+
ApiEndpoint(initial_response.polling_url),
619+
response_model=BFLFluxStatusResponse,
620+
status_extractor=lambda r: r.status,
621+
progress_extractor=lambda r: r.progress,
622+
price_extractor=price_extractor,
623+
completed_statuses=[BFLStatus.ready],
624+
failed_statuses=[
625+
BFLStatus.request_moderated,
626+
BFLStatus.content_moderated,
627+
BFLStatus.error,
628+
BFLStatus.task_not_found,
629+
],
630+
queued_statuses=[],
631+
)
632+
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
633+
634+
643635
class BFLExtension(ComfyExtension):
644636
@override
645637
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
646638
return [
647639
FluxProUltraImageNode,
648-
# FluxProImageNode,
649640
FluxKontextProImageNode,
650641
FluxKontextMaxImageNode,
651642
FluxProExpandNode,
652643
FluxProFillNode,
644+
Flux2ProImageNode,
653645
]
654646

655647

comfy_api_nodes/util/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
upload_video_to_comfyapi,
3737
)
3838
from .validation_utils import (
39+
get_image_dimensions,
3940
get_number_of_images,
4041
validate_aspect_ratio_string,
4142
validate_audio_duration,
@@ -82,6 +83,7 @@
8283
"trim_video",
8384
"video_to_base64_string",
8485
# Validation utilities
86+
"get_image_dimensions",
8587
"get_number_of_images",
8688
"validate_aspect_ratio_string",
8789
"validate_audio_duration",

0 commit comments

Comments
 (0)