Skip to content

Commit 4130be2

Browse files
Merge branch 'master' into ops-changes
2 parents fc3c52b + d1b9822 commit 4130be2

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

82 files changed

+6174
-963
lines changed

README.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,9 @@ ComfyUI follows a weekly release cycle targeting Monday but this regularly chang
119119

120120
1. **[ComfyUI Core](https://github.com/comfyanonymous/ComfyUI)**
121121
- Releases a new stable version (e.g., v0.7.0) roughly every week.
122+
- Starting from v0.4.0 patch versions will be used for fixes backported onto the current stable release.
123+
- Minor versions will be used for releases off the master branch.
124+
- Patch versions may still be used for releases on the master branch in cases where a backport would not make sense.
122125
- Commits outside of the stable release tags may be very unstable and break many custom nodes.
123126
- Serves as the foundation for the desktop release
124127

@@ -209,6 +212,8 @@ Python 3.14 works but you may encounter issues with the torch compile node. The
209212

210213
Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12
211214

215+
torch 2.4 and above is supported but some features might only work on newer versions. We generally recommend using the latest major version of pytorch unless it is less than 2 weeks old.
216+
212217
### Instructions:
213218

214219
Git clone this repo.

app/model_manager.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ async def get_model_folders(request):
4444
@routes.get("/experiment/models/{folder}")
4545
async def get_all_models(request):
4646
folder = request.match_info.get("folder", None)
47-
if not folder in folder_paths.folder_names_and_paths:
47+
if folder not in folder_paths.folder_names_and_paths:
4848
return web.Response(status=404)
4949
files = self.get_model_file_list(folder)
5050
return web.json_response(files)
@@ -55,7 +55,7 @@ async def get_model_preview(request):
5555
path_index = int(request.match_info.get("path_index", None))
5656
filename = request.match_info.get("filename", None)
5757

58-
if not folder_name in folder_paths.folder_names_and_paths:
58+
if folder_name not in folder_paths.folder_names_and_paths:
5959
return web.Response(status=404)
6060

6161
folders = folder_paths.folder_names_and_paths[folder_name]

comfy/clip_model.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,25 @@
22
from comfy.ldm.modules.attention import optimized_attention_for_device
33
import comfy.ops
44

5+
def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True):
6+
image = image[:, :, :, :3] if image.shape[3] > 3 else image
7+
mean = torch.tensor(mean, device=image.device, dtype=image.dtype)
8+
std = torch.tensor(std, device=image.device, dtype=image.dtype)
9+
image = image.movedim(-1, 1)
10+
if not (image.shape[2] == size and image.shape[3] == size):
11+
if crop:
12+
scale = (size / min(image.shape[2], image.shape[3]))
13+
scale_size = (round(scale * image.shape[2]), round(scale * image.shape[3]))
14+
else:
15+
scale_size = (size, size)
16+
17+
image = torch.nn.functional.interpolate(image, size=scale_size, mode="bicubic", antialias=True)
18+
h = (image.shape[2] - size)//2
19+
w = (image.shape[3] - size)//2
20+
image = image[:,:,h:h+size,w:w+size]
21+
image = torch.clip((255. * image), 0, 255).round() / 255.0
22+
return (image - mean.view([3,1,1])) / std.view([3,1,1])
23+
524
class CLIPAttention(torch.nn.Module):
625
def __init__(self, embed_dim, heads, dtype, device, operations):
726
super().__init__()

comfy/clip_vision.py

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
from .utils import load_torch_file, transformers_convert, state_dict_prefix_replace
22
import os
3-
import torch
43
import json
54
import logging
65

@@ -17,24 +16,7 @@ def __getitem__(self, key):
1716
def __setitem__(self, key, item):
1817
setattr(self, key, item)
1918

20-
def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True):
21-
image = image[:, :, :, :3] if image.shape[3] > 3 else image
22-
mean = torch.tensor(mean, device=image.device, dtype=image.dtype)
23-
std = torch.tensor(std, device=image.device, dtype=image.dtype)
24-
image = image.movedim(-1, 1)
25-
if not (image.shape[2] == size and image.shape[3] == size):
26-
if crop:
27-
scale = (size / min(image.shape[2], image.shape[3]))
28-
scale_size = (round(scale * image.shape[2]), round(scale * image.shape[3]))
29-
else:
30-
scale_size = (size, size)
31-
32-
image = torch.nn.functional.interpolate(image, size=scale_size, mode="bicubic", antialias=True)
33-
h = (image.shape[2] - size)//2
34-
w = (image.shape[3] - size)//2
35-
image = image[:,:,h:h+size,w:w+size]
36-
image = torch.clip((255. * image), 0, 255).round() / 255.0
37-
return (image - mean.view([3,1,1])) / std.view([3,1,1])
19+
clip_preprocess = comfy.clip_model.clip_preprocess # Prevent some stuff from breaking, TODO: remove eventually
3820

3921
IMAGE_ENCODERS = {
4022
"clip_vision_model": comfy.clip_model.CLIPVisionModelProjection,
@@ -73,7 +55,7 @@ def get_sd(self):
7355

7456
def encode_image(self, image, crop=True):
7557
comfy.model_management.load_model_gpu(self.patcher)
76-
pixel_values = clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop).float()
58+
pixel_values = comfy.clip_model.clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop).float()
7759
out = self.model(pixel_values=pixel_values, intermediate_output='all' if self.return_all_hidden_states else -2)
7860

7961
outputs = Output()

comfy/context_windows.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde
143143
# if multiple conds, split based on primary region
144144
if self.split_conds_to_windows and len(cond_in) > 1:
145145
region = window.get_region_index(len(cond_in))
146-
logging.info(f"Splitting conds to windows; using region {region} for window {window[0]}-{window[-1]} with center ratio {window.center_ratio:.3f}")
146+
logging.info(f"Splitting conds to windows; using region {region} for window {window.index_list[0]}-{window.index_list[-1]} with center ratio {window.center_ratio:.3f}")
147147
cond_in = [cond_in[region]]
148148
# cond object is a list containing a dict - outer list is irrelevant, so just loop through it
149149
for actual_cond in cond_in:
@@ -188,6 +188,12 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde
188188
audio_cond = cond_value.cond
189189
if audio_cond.ndim > 1 and audio_cond.size(1) == x_in.size(self.dim):
190190
new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(audio_cond, device, dim=1))
191+
# Handle vace_context (temporal dim is 3)
192+
elif cond_key == "vace_context" and hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor):
193+
vace_cond = cond_value.cond
194+
if vace_cond.ndim >= 4 and vace_cond.size(3) == x_in.size(self.dim):
195+
sliced_vace = window.get_tensor(vace_cond, device, dim=3, retain_index_list=self.cond_retain_index_list)
196+
new_cond_item[cond_key] = cond_value._copy_with(sliced_vace)
191197
# if has cond that is a Tensor, check if needs to be subset
192198
elif hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor):
193199
if (self.dim < cond_value.cond.ndim and cond_value.cond.size(self.dim) == x_in.size(self.dim)) or \

comfy/hooks.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -527,7 +527,8 @@ def prepare_current_keyframe(self, curr_t: float, transformer_options: dict[str,
527527
if self._current_keyframe.get_effective_guarantee_steps(max_sigma) > 0:
528528
break
529529
# if eval_c is outside the percent range, stop looking further
530-
else: break
530+
else:
531+
break
531532
# update steps current context is used
532533
self._current_used_steps += 1
533534
# update current timestep this was performed on

comfy/k_diffusion/sampling.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,9 @@ def get_ancestral_step(sigma_from, sigma_to, eta=1.):
7474

7575
def default_noise_sampler(x, seed=None):
7676
if seed is not None:
77+
if x.device == torch.device("cpu"):
78+
seed += 1
79+
7780
generator = torch.Generator(device=x.device)
7881
generator.manual_seed(seed)
7982
else:

comfy/latent_formats.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,9 @@ def __init__(self):
407407

408408
self.latent_rgb_factors_bias = [-0.0571, -0.1657, -0.2512]
409409

410+
class LTXAV(LTXV):
411+
pass
412+
410413
class HunyuanVideo(LatentFormat):
411414
latent_channels = 16
412415
latent_dimensions = 3

comfy/ldm/chroma_radiance/model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ def radiance_get_override_params(self, overrides: dict) -> ChromaRadianceParams:
270270
bad_keys = tuple(
271271
k
272272
for k, v in overrides.items()
273-
if type(v) != type(getattr(params, k)) and (v is not None or k not in nullable_keys)
273+
if not isinstance(v, type(getattr(params, k))) and (v is not None or k not in nullable_keys)
274274
)
275275
if bad_keys:
276276
e = f"Invalid value(s) in transformer_options chroma_radiance_options: {', '.join(bad_keys)}"

comfy/ldm/hunyuan_video/upsampler.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33
import torch.nn.functional as F
44
from comfy.ldm.modules.diffusionmodules.model import ResnetBlock, VideoConv3d
55
from comfy.ldm.hunyuan_video.vae_refiner import RMS_norm
6-
import model_management, model_patcher
6+
import model_management
7+
import model_patcher
78

89
class SRResidualCausalBlock3D(nn.Module):
910
def __init__(self, channels: int):

0 commit comments

Comments
 (0)