Skip to content

Commit 52e778f

Browse files
authored
feat(Kling-API-Nodes): add v2-5-turbo model to FirstLastFrame node (#10938)
1 parent 9d8a817 commit 52e778f

File tree

1 file changed

+26
-34
lines changed

1 file changed

+26
-34
lines changed

comfy_api_nodes/nodes_kling.py

Lines changed: 26 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@
44
- [Compatibility Table](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap)
55
"""
66

7-
from __future__ import annotations
8-
from typing import Optional, TypeVar
97
import math
108
import logging
119

@@ -66,9 +64,7 @@
6664
poll_op,
6765
)
6866
from comfy_api.input_impl import VideoFromFile
69-
from comfy_api.input.basic_types import AudioInput
70-
from comfy_api.input.video_types import VideoInput
71-
from comfy_api.latest import ComfyExtension, IO
67+
from comfy_api.latest import ComfyExtension, IO, Input
7268

7369
KLING_API_VERSION = "v1"
7470
PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video"
@@ -94,8 +90,6 @@
9490
AVERAGE_DURATION_VIDEO_EFFECTS = 320
9591
AVERAGE_DURATION_VIDEO_EXTEND = 320
9692

97-
R = TypeVar("R")
98-
9993

10094
MODE_TEXT2VIDEO = {
10195
"standard mode / 5s duration / kling-v1": ("std", "5", "kling-v1"),
@@ -130,6 +124,8 @@
130124
"pro mode / 10s duration / kling-v1-6": ("pro", "10", "kling-v1-6"),
131125
"pro mode / 5s duration / kling-v2-1": ("pro", "5", "kling-v2-1"),
132126
"pro mode / 10s duration / kling-v2-1": ("pro", "10", "kling-v2-1"),
127+
"pro mode / 5s duration / kling-v2-5-turbo": ("pro", "5", "kling-v2-5-turbo"),
128+
"pro mode / 10s duration / kling-v2-5-turbo": ("pro", "10", "kling-v2-5-turbo"),
133129
}
134130
"""
135131
Returns a mapping of mode strings to their corresponding (mode, duration, model_name) tuples.
@@ -296,7 +292,7 @@ def get_video_from_response(response) -> KlingVideoResult:
296292
return video
297293

298294

299-
def get_video_url_from_response(response) -> Optional[str]:
295+
def get_video_url_from_response(response) -> str | None:
300296
"""Returns the first video url from the Kling video generation task result.
301297
Will not raise an error if the response is not valid.
302298
"""
@@ -315,7 +311,7 @@ def get_images_from_response(response) -> list[KlingImageResult]:
315311
return images
316312

317313

318-
def get_images_urls_from_response(response) -> Optional[str]:
314+
def get_images_urls_from_response(response) -> str | None:
319315
"""Returns the list of image urls from the Kling image generation task result.
320316
Will not raise an error if the response is not valid. If there is only one image, returns the url as a string. If there are multiple images, returns a list of urls.
321317
"""
@@ -349,7 +345,7 @@ async def execute_text2video(
349345
model_mode: str,
350346
duration: str,
351347
aspect_ratio: str,
352-
camera_control: Optional[KlingCameraControl] = None,
348+
camera_control: KlingCameraControl | None = None,
353349
) -> IO.NodeOutput:
354350
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V)
355351
task_creation_response = await sync_op(
@@ -394,8 +390,8 @@ async def execute_image2video(
394390
model_mode: str,
395391
aspect_ratio: str,
396392
duration: str,
397-
camera_control: Optional[KlingCameraControl] = None,
398-
end_frame: Optional[torch.Tensor] = None,
393+
camera_control: KlingCameraControl | None = None,
394+
end_frame: torch.Tensor | None = None,
399395
) -> IO.NodeOutput:
400396
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V)
401397
validate_input_image(start_frame)
@@ -451,8 +447,8 @@ async def execute_video_effect(
451447
model_name: str,
452448
duration: KlingVideoGenDuration,
453449
image_1: torch.Tensor,
454-
image_2: Optional[torch.Tensor] = None,
455-
model_mode: Optional[KlingVideoGenMode] = None,
450+
image_2: torch.Tensor | None = None,
451+
model_mode: KlingVideoGenMode | None = None,
456452
) -> tuple[VideoFromFile, str, str]:
457453
if dual_character:
458454
request_input_field = KlingDualCharacterEffectInput(
@@ -499,13 +495,13 @@ async def execute_video_effect(
499495

500496
async def execute_lipsync(
501497
cls: type[IO.ComfyNode],
502-
video: VideoInput,
503-
audio: Optional[AudioInput] = None,
504-
voice_language: Optional[str] = None,
505-
model_mode: Optional[str] = None,
506-
text: Optional[str] = None,
507-
voice_speed: Optional[float] = None,
508-
voice_id: Optional[str] = None,
498+
video: Input.Video,
499+
audio: Input.Audio | None = None,
500+
voice_language: str | None = None,
501+
model_mode: str | None = None,
502+
text: str | None = None,
503+
voice_speed: float | None = None,
504+
voice_id: str | None = None,
509505
) -> IO.NodeOutput:
510506
if text:
511507
validate_string(text, field_name="Text", max_length=MAX_PROMPT_LENGTH_LIP_SYNC)
@@ -787,7 +783,7 @@ async def execute(
787783
negative_prompt: str,
788784
cfg_scale: float,
789785
aspect_ratio: str,
790-
camera_control: Optional[KlingCameraControl] = None,
786+
camera_control: KlingCameraControl | None = None,
791787
) -> IO.NodeOutput:
792788
return await execute_text2video(
793789
cls,
@@ -854,8 +850,8 @@ async def execute(
854850
mode: str,
855851
aspect_ratio: str,
856852
duration: str,
857-
camera_control: Optional[KlingCameraControl] = None,
858-
end_frame: Optional[torch.Tensor] = None,
853+
camera_control: KlingCameraControl | None = None,
854+
end_frame: torch.Tensor | None = None,
859855
) -> IO.NodeOutput:
860856
return await execute_image2video(
861857
cls,
@@ -965,15 +961,11 @@ def define_schema(cls) -> IO.Schema:
965961
IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
966962
IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
967963
IO.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0),
968-
IO.Combo.Input(
969-
"aspect_ratio",
970-
options=[i.value for i in KlingVideoGenAspectRatio],
971-
default="16:9",
972-
),
964+
IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]),
973965
IO.Combo.Input(
974966
"mode",
975967
options=modes,
976-
default=modes[2],
968+
default=modes[8],
977969
tooltip="The configuration to use for the video generation following the format: mode / duration / model_name.",
978970
),
979971
],
@@ -1254,8 +1246,8 @@ def define_schema(cls) -> IO.Schema:
12541246
@classmethod
12551247
async def execute(
12561248
cls,
1257-
video: VideoInput,
1258-
audio: AudioInput,
1249+
video: Input.Video,
1250+
audio: Input.Audio,
12591251
voice_language: str,
12601252
) -> IO.NodeOutput:
12611253
return await execute_lipsync(
@@ -1314,7 +1306,7 @@ def define_schema(cls) -> IO.Schema:
13141306
@classmethod
13151307
async def execute(
13161308
cls,
1317-
video: VideoInput,
1309+
video: Input.Video,
13181310
text: str,
13191311
voice: str,
13201312
voice_speed: float,
@@ -1471,7 +1463,7 @@ async def execute(
14711463
human_fidelity: float,
14721464
n: int,
14731465
aspect_ratio: KlingImageGenAspectRatio,
1474-
image: Optional[torch.Tensor] = None,
1466+
image: torch.Tensor | None = None,
14751467
) -> IO.NodeOutput:
14761468
validate_string(prompt, field_name="prompt", min_length=1, max_length=MAX_PROMPT_LENGTH_IMAGE_GEN)
14771469
validate_string(negative_prompt, field_name="negative_prompt", max_length=MAX_PROMPT_LENGTH_IMAGE_GEN)

0 commit comments

Comments
 (0)