|
1 | 1 | from inspect import cleandoc |
2 | | -from typing import Optional |
3 | 2 |
|
4 | 3 | import torch |
| 4 | +from pydantic import BaseModel |
5 | 5 | from typing_extensions import override |
6 | 6 |
|
7 | 7 | from comfy_api.latest import IO, ComfyExtension |
8 | 8 | from comfy_api_nodes.apis.bfl_api import ( |
9 | 9 | BFLFluxExpandImageRequest, |
10 | 10 | BFLFluxFillImageRequest, |
11 | 11 | BFLFluxKontextProGenerateRequest, |
12 | | - BFLFluxProGenerateRequest, |
13 | 12 | BFLFluxProGenerateResponse, |
14 | 13 | BFLFluxProUltraGenerateRequest, |
15 | 14 | BFLFluxStatusResponse, |
16 | 15 | BFLStatus, |
| 16 | + Flux2ProGenerateRequest, |
17 | 17 | ) |
18 | 18 | from comfy_api_nodes.util import ( |
19 | 19 | ApiEndpoint, |
20 | 20 | download_url_to_image_tensor, |
| 21 | + get_number_of_images, |
21 | 22 | poll_op, |
22 | 23 | resize_mask_to_image, |
23 | 24 | sync_op, |
@@ -116,7 +117,7 @@ async def execute( |
116 | 117 | prompt_upsampling: bool = False, |
117 | 118 | raw: bool = False, |
118 | 119 | seed: int = 0, |
119 | | - image_prompt: Optional[torch.Tensor] = None, |
| 120 | + image_prompt: torch.Tensor | None = None, |
120 | 121 | image_prompt_strength: float = 0.1, |
121 | 122 | ) -> IO.NodeOutput: |
122 | 123 | if image_prompt is None: |
@@ -230,7 +231,7 @@ async def execute( |
230 | 231 | aspect_ratio: str, |
231 | 232 | guidance: float, |
232 | 233 | steps: int, |
233 | | - input_image: Optional[torch.Tensor] = None, |
| 234 | + input_image: torch.Tensor | None = None, |
234 | 235 | seed=0, |
235 | 236 | prompt_upsampling=False, |
236 | 237 | ) -> IO.NodeOutput: |
@@ -280,124 +281,6 @@ class FluxKontextMaxImageNode(FluxKontextProImageNode): |
280 | 281 | DISPLAY_NAME = "Flux.1 Kontext [max] Image" |
281 | 282 |
|
282 | 283 |
|
283 | | -class FluxProImageNode(IO.ComfyNode): |
284 | | - """ |
285 | | - Generates images synchronously based on prompt and resolution. |
286 | | - """ |
287 | | - |
288 | | - @classmethod |
289 | | - def define_schema(cls) -> IO.Schema: |
290 | | - return IO.Schema( |
291 | | - node_id="FluxProImageNode", |
292 | | - display_name="Flux 1.1 [pro] Image", |
293 | | - category="api node/image/BFL", |
294 | | - description=cleandoc(cls.__doc__ or ""), |
295 | | - inputs=[ |
296 | | - IO.String.Input( |
297 | | - "prompt", |
298 | | - multiline=True, |
299 | | - default="", |
300 | | - tooltip="Prompt for the image generation", |
301 | | - ), |
302 | | - IO.Boolean.Input( |
303 | | - "prompt_upsampling", |
304 | | - default=False, |
305 | | - tooltip="Whether to perform upsampling on the prompt. " |
306 | | - "If active, automatically modifies the prompt for more creative generation, " |
307 | | - "but results are nondeterministic (same seed will not produce exactly the same result).", |
308 | | - ), |
309 | | - IO.Int.Input( |
310 | | - "width", |
311 | | - default=1024, |
312 | | - min=256, |
313 | | - max=1440, |
314 | | - step=32, |
315 | | - ), |
316 | | - IO.Int.Input( |
317 | | - "height", |
318 | | - default=768, |
319 | | - min=256, |
320 | | - max=1440, |
321 | | - step=32, |
322 | | - ), |
323 | | - IO.Int.Input( |
324 | | - "seed", |
325 | | - default=0, |
326 | | - min=0, |
327 | | - max=0xFFFFFFFFFFFFFFFF, |
328 | | - control_after_generate=True, |
329 | | - tooltip="The random seed used for creating the noise.", |
330 | | - ), |
331 | | - IO.Image.Input( |
332 | | - "image_prompt", |
333 | | - optional=True, |
334 | | - ), |
335 | | - # "image_prompt_strength": ( |
336 | | - # IO.FLOAT, |
337 | | - # { |
338 | | - # "default": 0.1, |
339 | | - # "min": 0.0, |
340 | | - # "max": 1.0, |
341 | | - # "step": 0.01, |
342 | | - # "tooltip": "Blend between the prompt and the image prompt.", |
343 | | - # }, |
344 | | - # ), |
345 | | - ], |
346 | | - outputs=[IO.Image.Output()], |
347 | | - hidden=[ |
348 | | - IO.Hidden.auth_token_comfy_org, |
349 | | - IO.Hidden.api_key_comfy_org, |
350 | | - IO.Hidden.unique_id, |
351 | | - ], |
352 | | - is_api_node=True, |
353 | | - ) |
354 | | - |
355 | | - @classmethod |
356 | | - async def execute( |
357 | | - cls, |
358 | | - prompt: str, |
359 | | - prompt_upsampling, |
360 | | - width: int, |
361 | | - height: int, |
362 | | - seed=0, |
363 | | - image_prompt=None, |
364 | | - # image_prompt_strength=0.1, |
365 | | - ) -> IO.NodeOutput: |
366 | | - image_prompt = image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt) |
367 | | - initial_response = await sync_op( |
368 | | - cls, |
369 | | - ApiEndpoint( |
370 | | - path="/proxy/bfl/flux-pro-1.1/generate", |
371 | | - method="POST", |
372 | | - ), |
373 | | - response_model=BFLFluxProGenerateResponse, |
374 | | - data=BFLFluxProGenerateRequest( |
375 | | - prompt=prompt, |
376 | | - prompt_upsampling=prompt_upsampling, |
377 | | - width=width, |
378 | | - height=height, |
379 | | - seed=seed, |
380 | | - image_prompt=image_prompt, |
381 | | - ), |
382 | | - ) |
383 | | - response = await poll_op( |
384 | | - cls, |
385 | | - ApiEndpoint(initial_response.polling_url), |
386 | | - response_model=BFLFluxStatusResponse, |
387 | | - status_extractor=lambda r: r.status, |
388 | | - progress_extractor=lambda r: r.progress, |
389 | | - completed_statuses=[BFLStatus.ready], |
390 | | - failed_statuses=[ |
391 | | - BFLStatus.request_moderated, |
392 | | - BFLStatus.content_moderated, |
393 | | - BFLStatus.error, |
394 | | - BFLStatus.task_not_found, |
395 | | - ], |
396 | | - queued_statuses=[], |
397 | | - ) |
398 | | - return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) |
399 | | - |
400 | | - |
401 | 284 | class FluxProExpandNode(IO.ComfyNode): |
402 | 285 | """ |
403 | 286 | Outpaints image based on prompt. |
@@ -640,16 +523,125 @@ async def execute( |
640 | 523 | return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) |
641 | 524 |
|
642 | 525 |
|
| 526 | +class Flux2ProImageNode(IO.ComfyNode): |
| 527 | + |
| 528 | + @classmethod |
| 529 | + def define_schema(cls) -> IO.Schema: |
| 530 | + return IO.Schema( |
| 531 | + node_id="Flux2ProImageNode", |
| 532 | + display_name="Flux.2 [pro] Image", |
| 533 | + category="api node/image/BFL", |
| 534 | + description="Generates images synchronously based on prompt and resolution.", |
| 535 | + inputs=[ |
| 536 | + IO.String.Input( |
| 537 | + "prompt", |
| 538 | + multiline=True, |
| 539 | + default="", |
| 540 | + tooltip="Prompt for the image generation or edit", |
| 541 | + ), |
| 542 | + IO.Int.Input( |
| 543 | + "width", |
| 544 | + default=1024, |
| 545 | + min=256, |
| 546 | + max=2048, |
| 547 | + step=32, |
| 548 | + ), |
| 549 | + IO.Int.Input( |
| 550 | + "height", |
| 551 | + default=768, |
| 552 | + min=256, |
| 553 | + max=2048, |
| 554 | + step=32, |
| 555 | + ), |
| 556 | + IO.Int.Input( |
| 557 | + "seed", |
| 558 | + default=0, |
| 559 | + min=0, |
| 560 | + max=0xFFFFFFFFFFFFFFFF, |
| 561 | + control_after_generate=True, |
| 562 | + tooltip="The random seed used for creating the noise.", |
| 563 | + ), |
| 564 | + IO.Boolean.Input( |
| 565 | + "prompt_upsampling", |
| 566 | + default=False, |
| 567 | + tooltip="Whether to perform upsampling on the prompt. " |
| 568 | + "If active, automatically modifies the prompt for more creative generation, " |
| 569 | + "but results are nondeterministic (same seed will not produce exactly the same result).", |
| 570 | + ), |
| 571 | + IO.Image.Input("images", optional=True, tooltip="Up to 4 images to be used as references."), |
| 572 | + ], |
| 573 | + outputs=[IO.Image.Output()], |
| 574 | + hidden=[ |
| 575 | + IO.Hidden.auth_token_comfy_org, |
| 576 | + IO.Hidden.api_key_comfy_org, |
| 577 | + IO.Hidden.unique_id, |
| 578 | + ], |
| 579 | + is_api_node=True, |
| 580 | + ) |
| 581 | + |
| 582 | + @classmethod |
| 583 | + async def execute( |
| 584 | + cls, |
| 585 | + prompt: str, |
| 586 | + width: int, |
| 587 | + height: int, |
| 588 | + seed: int, |
| 589 | + prompt_upsampling: bool, |
| 590 | + images: torch.Tensor | None = None, |
| 591 | + ) -> IO.NodeOutput: |
| 592 | + reference_images = {} |
| 593 | + if images is not None: |
| 594 | + if get_number_of_images(images) > 9: |
| 595 | + raise ValueError("The current maximum number of supported images is 9.") |
| 596 | + for image_index in range(images.shape[0]): |
| 597 | + key_name = f"input_image_{image_index + 1}" if image_index else "input_image" |
| 598 | + reference_images[key_name] = tensor_to_base64_string(images[image_index], total_pixels=2048 * 2048) |
| 599 | + initial_response = await sync_op( |
| 600 | + cls, |
| 601 | + ApiEndpoint(path="/proxy/bfl/flux-2-pro/generate", method="POST"), |
| 602 | + response_model=BFLFluxProGenerateResponse, |
| 603 | + data=Flux2ProGenerateRequest( |
| 604 | + prompt=prompt, |
| 605 | + width=width, |
| 606 | + height=height, |
| 607 | + seed=seed, |
| 608 | + prompt_upsampling=prompt_upsampling, |
| 609 | + **reference_images, |
| 610 | + ), |
| 611 | + ) |
| 612 | + |
| 613 | + def price_extractor(_r: BaseModel) -> float | None: |
| 614 | + return None if initial_response.cost is None else initial_response.cost / 100 |
| 615 | + |
| 616 | + response = await poll_op( |
| 617 | + cls, |
| 618 | + ApiEndpoint(initial_response.polling_url), |
| 619 | + response_model=BFLFluxStatusResponse, |
| 620 | + status_extractor=lambda r: r.status, |
| 621 | + progress_extractor=lambda r: r.progress, |
| 622 | + price_extractor=price_extractor, |
| 623 | + completed_statuses=[BFLStatus.ready], |
| 624 | + failed_statuses=[ |
| 625 | + BFLStatus.request_moderated, |
| 626 | + BFLStatus.content_moderated, |
| 627 | + BFLStatus.error, |
| 628 | + BFLStatus.task_not_found, |
| 629 | + ], |
| 630 | + queued_statuses=[], |
| 631 | + ) |
| 632 | + return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"])) |
| 633 | + |
| 634 | + |
643 | 635 | class BFLExtension(ComfyExtension): |
644 | 636 | @override |
645 | 637 | async def get_node_list(self) -> list[type[IO.ComfyNode]]: |
646 | 638 | return [ |
647 | 639 | FluxProUltraImageNode, |
648 | | - # FluxProImageNode, |
649 | 640 | FluxKontextProImageNode, |
650 | 641 | FluxKontextMaxImageNode, |
651 | 642 | FluxProExpandNode, |
652 | 643 | FluxProFillNode, |
| 644 | + Flux2ProImageNode, |
653 | 645 | ] |
654 | 646 |
|
655 | 647 |
|
|
0 commit comments