Skip to content

Commit ae65e18

Browse files
committed
chore: replace imports of deprecated V1 classes
1 parent 35fa091 commit ae65e18

File tree

7 files changed

+55
-70
lines changed

7 files changed

+55
-70
lines changed

comfy_api_nodes/apis/veo_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ class Response1(BaseModel):
8585
raiMediaFilteredReasons: Optional[list[str]] = Field(
8686
None, description='Reasons why media was filtered by responsible AI policies'
8787
)
88-
videos: Optional[list[Video]] = None
88+
videos: Optional[list[Video]] = Field(None)
8989

9090

9191
class VeoGenVidPollResponse(BaseModel):

comfy_api_nodes/nodes_gemini.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,7 @@
1313
from typing_extensions import override
1414

1515
import folder_paths
16-
from comfy_api.latest import IO, ComfyExtension, Input
17-
from comfy_api.util import VideoCodec, VideoContainer
16+
from comfy_api.latest import IO, ComfyExtension, Input, Types
1817
from comfy_api_nodes.apis.gemini_api import (
1918
GeminiContent,
2019
GeminiFileData,
@@ -68,7 +67,7 @@ class GeminiImageModel(str, Enum):
6867

6968
async def create_image_parts(
7069
cls: type[IO.ComfyNode],
71-
images: torch.Tensor,
70+
images: Input.Image,
7271
image_limit: int = 0,
7372
) -> list[GeminiPart]:
7473
image_parts: list[GeminiPart] = []
@@ -154,8 +153,8 @@ def get_text_from_response(response: GeminiGenerateContentResponse) -> str:
154153
return "\n".join([part.text for part in parts])
155154

156155

157-
def get_image_from_response(response: GeminiGenerateContentResponse) -> torch.Tensor:
158-
image_tensors: list[torch.Tensor] = []
156+
def get_image_from_response(response: GeminiGenerateContentResponse) -> Input.Image:
157+
image_tensors: list[Input.Image] = []
159158
parts = get_parts_by_type(response, "image/png")
160159
for part in parts:
161160
image_data = base64.b64decode(part.inlineData.data)
@@ -293,7 +292,9 @@ def define_schema(cls):
293292
def create_video_parts(cls, video_input: Input.Video) -> list[GeminiPart]:
294293
"""Convert video input to Gemini API compatible parts."""
295294

296-
base_64_string = video_to_base64_string(video_input, container_format=VideoContainer.MP4, codec=VideoCodec.H264)
295+
base_64_string = video_to_base64_string(
296+
video_input, container_format=Types.VideoContainer.MP4, codec=Types.VideoCodec.H264
297+
)
297298
return [
298299
GeminiPart(
299300
inlineData=GeminiInlineData(
@@ -343,7 +344,7 @@ async def execute(
343344
prompt: str,
344345
model: str,
345346
seed: int,
346-
images: torch.Tensor | None = None,
347+
images: Input.Image | None = None,
347348
audio: Input.Audio | None = None,
348349
video: Input.Video | None = None,
349350
files: list[GeminiPart] | None = None,
@@ -542,7 +543,7 @@ async def execute(
542543
prompt: str,
543544
model: str,
544545
seed: int,
545-
images: torch.Tensor | None = None,
546+
images: Input.Image | None = None,
546547
files: list[GeminiPart] | None = None,
547548
aspect_ratio: str = "auto",
548549
response_modalities: str = "IMAGE+TEXT",
@@ -662,7 +663,7 @@ async def execute(
662663
aspect_ratio: str,
663664
resolution: str,
664665
response_modalities: str,
665-
images: torch.Tensor | None = None,
666+
images: Input.Image | None = None,
666667
files: list[GeminiPart] | None = None,
667668
) -> IO.NodeOutput:
668669
validate_string(prompt, strip_whitespace=True, min_length=1)

comfy_api_nodes/nodes_ltxv.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,9 @@
11
from io import BytesIO
2-
from typing import Optional
32

4-
import torch
53
from pydantic import BaseModel, Field
64
from typing_extensions import override
75

8-
from comfy_api.input_impl import VideoFromFile
9-
from comfy_api.latest import IO, ComfyExtension
6+
from comfy_api.latest import IO, ComfyExtension, Input, InputImpl
107
from comfy_api_nodes.util import (
118
ApiEndpoint,
129
get_number_of_images,
@@ -26,9 +23,9 @@ class ExecuteTaskRequest(BaseModel):
2623
model: str = Field(...)
2724
duration: int = Field(...)
2825
resolution: str = Field(...)
29-
fps: Optional[int] = Field(25)
30-
generate_audio: Optional[bool] = Field(True)
31-
image_uri: Optional[str] = Field(None)
26+
fps: int | None = Field(25)
27+
generate_audio: bool | None = Field(True)
28+
image_uri: str | None = Field(None)
3229

3330

3431
class TextToVideoNode(IO.ComfyNode):
@@ -103,7 +100,7 @@ async def execute(
103100
as_binary=True,
104101
max_retries=1,
105102
)
106-
return IO.NodeOutput(VideoFromFile(BytesIO(response)))
103+
return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(response)))
107104

108105

109106
class ImageToVideoNode(IO.ComfyNode):
@@ -153,7 +150,7 @@ def define_schema(cls):
153150
@classmethod
154151
async def execute(
155152
cls,
156-
image: torch.Tensor,
153+
image: Input.Image,
157154
model: str,
158155
prompt: str,
159156
duration: int,
@@ -183,7 +180,7 @@ async def execute(
183180
as_binary=True,
184181
max_retries=1,
185182
)
186-
return IO.NodeOutput(VideoFromFile(BytesIO(response)))
183+
return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(response)))
187184

188185

189186
class LtxvApiExtension(ComfyExtension):

comfy_api_nodes/nodes_moonvalley.py

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,8 @@
11
import logging
2-
from typing import Optional
32

4-
import torch
53
from typing_extensions import override
64

7-
from comfy_api.input import VideoInput
8-
from comfy_api.latest import IO, ComfyExtension
5+
from comfy_api.latest import IO, ComfyExtension, Input
96
from comfy_api_nodes.apis import (
107
MoonvalleyPromptResponse,
118
MoonvalleyTextToVideoInferenceParams,
@@ -61,7 +58,7 @@ def validate_task_creation_response(response) -> None:
6158
raise RuntimeError(error_msg)
6259

6360

64-
def validate_video_to_video_input(video: VideoInput) -> VideoInput:
61+
def validate_video_to_video_input(video: Input.Video) -> Input.Video:
6562
"""
6663
Validates and processes video input for Moonvalley Video-to-Video generation.
6764
@@ -82,7 +79,7 @@ def validate_video_to_video_input(video: VideoInput) -> VideoInput:
8279
return _validate_and_trim_duration(video)
8380

8481

85-
def _get_video_dimensions(video: VideoInput) -> tuple[int, int]:
82+
def _get_video_dimensions(video: Input.Video) -> tuple[int, int]:
8683
"""Extracts video dimensions with error handling."""
8784
try:
8885
return video.get_dimensions()
@@ -106,7 +103,7 @@ def _validate_video_dimensions(width: int, height: int) -> None:
106103
raise ValueError(f"Resolution {width}x{height} not supported. Supported: {supported_list}")
107104

108105

109-
def _validate_and_trim_duration(video: VideoInput) -> VideoInput:
106+
def _validate_and_trim_duration(video: Input.Video) -> Input.Video:
110107
"""Validates video duration and trims to 5 seconds if needed."""
111108
duration = video.get_duration()
112109
_validate_minimum_duration(duration)
@@ -119,7 +116,7 @@ def _validate_minimum_duration(duration: float) -> None:
119116
raise ValueError("Input video must be at least 5 seconds long.")
120117

121118

122-
def _trim_if_too_long(video: VideoInput, duration: float) -> VideoInput:
119+
def _trim_if_too_long(video: Input.Video, duration: float) -> Input.Video:
123120
"""Trims video to 5 seconds if longer."""
124121
if duration > 5:
125122
return trim_video(video, 5)
@@ -241,7 +238,7 @@ def define_schema(cls) -> IO.Schema:
241238
@classmethod
242239
async def execute(
243240
cls,
244-
image: torch.Tensor,
241+
image: Input.Image,
245242
prompt: str,
246243
negative_prompt: str,
247244
resolution: str,
@@ -362,9 +359,9 @@ async def execute(
362359
prompt: str,
363360
negative_prompt: str,
364361
seed: int,
365-
video: Optional[VideoInput] = None,
362+
video: Input.Video | None = None,
366363
control_type: str = "Motion Transfer",
367-
motion_intensity: Optional[int] = 100,
364+
motion_intensity: int | None = 100,
368365
steps=33,
369366
prompt_adherence=4.5,
370367
) -> IO.NodeOutput:

comfy_api_nodes/nodes_runway.py

Lines changed: 13 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,11 @@
1111
1212
"""
1313

14-
from typing import Union, Optional
15-
from typing_extensions import override
1614
from enum import Enum
1715

18-
import torch
16+
from typing_extensions import override
1917

18+
from comfy_api.latest import IO, ComfyExtension, Input, InputImpl
2019
from comfy_api_nodes.apis import (
2120
RunwayImageToVideoRequest,
2221
RunwayImageToVideoResponse,
@@ -44,8 +43,6 @@
4443
sync_op,
4544
poll_op,
4645
)
47-
from comfy_api.input_impl import VideoFromFile
48-
from comfy_api.latest import ComfyExtension, IO
4946

5047
PATH_IMAGE_TO_VIDEO = "/proxy/runway/image_to_video"
5148
PATH_TEXT_TO_IMAGE = "/proxy/runway/text_to_image"
@@ -80,7 +77,7 @@ class RunwayGen3aAspectRatio(str, Enum):
8077
field_1280_768 = "1280:768"
8178

8279

83-
def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]:
80+
def get_video_url_from_task_status(response: TaskStatusResponse) -> str | None:
8481
"""Returns the video URL from the task status response if it exists."""
8582
if hasattr(response, "output") and len(response.output) > 0:
8683
return response.output[0]
@@ -89,21 +86,21 @@ def get_video_url_from_task_status(response: TaskStatusResponse) -> Union[str, N
8986

9087
def extract_progress_from_task_status(
9188
response: TaskStatusResponse,
92-
) -> Union[float, None]:
89+
) -> float | None:
9390
if hasattr(response, "progress") and response.progress is not None:
9491
return response.progress * 100
9592
return None
9693

9794

98-
def get_image_url_from_task_status(response: TaskStatusResponse) -> Union[str, None]:
95+
def get_image_url_from_task_status(response: TaskStatusResponse) -> str | None:
9996
"""Returns the image URL from the task status response if it exists."""
10097
if hasattr(response, "output") and len(response.output) > 0:
10198
return response.output[0]
10299
return None
103100

104101

105102
async def get_response(
106-
cls: type[IO.ComfyNode], task_id: str, estimated_duration: Optional[int] = None
103+
cls: type[IO.ComfyNode], task_id: str, estimated_duration: int | None = None
107104
) -> TaskStatusResponse:
108105
"""Poll the task status until it is finished then get the response."""
109106
return await poll_op(
@@ -119,8 +116,8 @@ async def get_response(
119116
async def generate_video(
120117
cls: type[IO.ComfyNode],
121118
request: RunwayImageToVideoRequest,
122-
estimated_duration: Optional[int] = None,
123-
) -> VideoFromFile:
119+
estimated_duration: int | None = None,
120+
) -> InputImpl.VideoFromFile:
124121
initial_response = await sync_op(
125122
cls,
126123
endpoint=ApiEndpoint(path=PATH_IMAGE_TO_VIDEO, method="POST"),
@@ -193,7 +190,7 @@ def define_schema(cls):
193190
async def execute(
194191
cls,
195192
prompt: str,
196-
start_frame: torch.Tensor,
193+
start_frame: Input.Image,
197194
duration: str,
198195
ratio: str,
199196
seed: int,
@@ -283,7 +280,7 @@ def define_schema(cls):
283280
async def execute(
284281
cls,
285282
prompt: str,
286-
start_frame: torch.Tensor,
283+
start_frame: Input.Image,
287284
duration: str,
288285
ratio: str,
289286
seed: int,
@@ -381,8 +378,8 @@ def define_schema(cls):
381378
async def execute(
382379
cls,
383380
prompt: str,
384-
start_frame: torch.Tensor,
385-
end_frame: torch.Tensor,
381+
start_frame: Input.Image,
382+
end_frame: Input.Image,
386383
duration: str,
387384
ratio: str,
388385
seed: int,
@@ -467,7 +464,7 @@ async def execute(
467464
cls,
468465
prompt: str,
469466
ratio: str,
470-
reference_image: Optional[torch.Tensor] = None,
467+
reference_image: Input.Image | None = None,
471468
) -> IO.NodeOutput:
472469
validate_string(prompt, min_length=1)
473470

comfy_api_nodes/nodes_veo2.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
11
import base64
22
from io import BytesIO
33

4-
import torch
54
from typing_extensions import override
65

7-
from comfy_api.input_impl.video_types import VideoFromFile
8-
from comfy_api.latest import IO, ComfyExtension
6+
from comfy_api.latest import IO, ComfyExtension, Input, InputImpl
97
from comfy_api_nodes.apis.veo_api import (
108
VeoGenVidPollRequest,
119
VeoGenVidPollResponse,
@@ -232,7 +230,7 @@ def status_extractor(response):
232230

233231
# Check if video is provided as base64 or URL
234232
if hasattr(video, "bytesBase64Encoded") and video.bytesBase64Encoded:
235-
return IO.NodeOutput(VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded))))
233+
return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded))))
236234

237235
if hasattr(video, "gcsUri") and video.gcsUri:
238236
return IO.NodeOutput(await download_url_to_video_output(video.gcsUri))
@@ -431,8 +429,8 @@ async def execute(
431429
aspect_ratio: str,
432430
duration: int,
433431
seed: int,
434-
first_frame: torch.Tensor,
435-
last_frame: torch.Tensor,
432+
first_frame: Input.Image,
433+
last_frame: Input.Image,
436434
model: str,
437435
generate_audio: bool,
438436
):
@@ -493,7 +491,7 @@ async def execute(
493491
if response.videos:
494492
video = response.videos[0]
495493
if video.bytesBase64Encoded:
496-
return IO.NodeOutput(VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded))))
494+
return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded))))
497495
if video.gcsUri:
498496
return IO.NodeOutput(await download_url_to_video_output(video.gcsUri))
499497
raise Exception("Video returned but no data or URL was provided")

0 commit comments

Comments
 (0)