Skip to content

Commit cb7d245

Browse files
committed
Merge branch 'master' into v3-improvements
2 parents 6630cd8 + 5d9ad0c commit cb7d245

File tree

19 files changed

+373
-198
lines changed

19 files changed

+373
-198
lines changed

.github/workflows/test-ci.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ on:
55
push:
66
branches:
77
- master
8+
- release/**
89
paths-ignore:
910
- 'app/**'
1011
- 'input/**'

.github/workflows/test-execution.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@ name: Execution Tests
22

33
on:
44
push:
5-
branches: [ main, master ]
5+
branches: [ main, master, release/** ]
66
pull_request:
7-
branches: [ main, master ]
7+
branches: [ main, master, release/** ]
88

99
jobs:
1010
test:

.github/workflows/test-launch.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@ name: Test server launches without errors
22

33
on:
44
push:
5-
branches: [ main, master ]
5+
branches: [ main, master, release/** ]
66
pull_request:
7-
branches: [ main, master ]
7+
branches: [ main, master, release/** ]
88

99
jobs:
1010
test:

.github/workflows/test-unit.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@ name: Unit Tests
22

33
on:
44
push:
5-
branches: [ main, master ]
5+
branches: [ main, master, release/** ]
66
pull_request:
7-
branches: [ main, master ]
7+
branches: [ main, master, release/** ]
88

99
jobs:
1010
test:

.github/workflows/update-version.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ on:
66
- "pyproject.toml"
77
branches:
88
- master
9+
- release/**
910

1011
jobs:
1112
update-version:

comfy/k_diffusion/sampling.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1618,6 +1618,17 @@ def sample_seeds_2(model, x, sigmas, extra_args=None, callback=None, disable=Non
16181618
x = x + sde_noise * sigmas[i + 1] * s_noise
16191619
return x
16201620

1621+
@torch.no_grad()
1622+
def sample_exp_heun_2_x0(model, x, sigmas, extra_args=None, callback=None, disable=None, solver_type="phi_2"):
1623+
"""Deterministic exponential Heun second order method in data prediction (x0) and logSNR time."""
1624+
return sample_seeds_2(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=0.0, s_noise=0.0, noise_sampler=None, r=1.0, solver_type=solver_type)
1625+
1626+
1627+
@torch.no_grad()
1628+
def sample_exp_heun_2_x0_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type="phi_2"):
1629+
"""Stochastic exponential Heun second order method in data prediction (x0) and logSNR time."""
1630+
return sample_seeds_2(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=1.0, solver_type=solver_type)
1631+
16211632

16221633
@torch.no_grad()
16231634
def sample_seeds_3(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r_1=1./3, r_2=2./3):
@@ -1765,7 +1776,7 @@ def sample_sa_solver(model, x, sigmas, extra_args=None, callback=None, disable=F
17651776
# Predictor
17661777
if sigmas[i + 1] == 0:
17671778
# Denoising step
1768-
x = denoised
1779+
x_pred = denoised
17691780
else:
17701781
tau_t = tau_func(sigmas[i + 1])
17711782
curr_lambdas = lambdas[i - predictor_order_used + 1:i + 1]
@@ -1786,7 +1797,7 @@ def sample_sa_solver(model, x, sigmas, extra_args=None, callback=None, disable=F
17861797
if tau_t > 0 and s_noise > 0:
17871798
noise = noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * tau_t ** 2 * h).expm1().neg().sqrt() * s_noise
17881799
x_pred = x_pred + noise
1789-
return x
1800+
return x_pred
17901801

17911802

17921803
@torch.no_grad()

comfy/ldm/qwen_image/model.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -322,6 +322,7 @@ def __init__(
322322
pooled_projection_dim: int = 768,
323323
guidance_embeds: bool = False,
324324
axes_dims_rope: Tuple[int, int, int] = (16, 56, 56),
325+
default_ref_method="index",
325326
image_model=None,
326327
final_layer=True,
327328
dtype=None,
@@ -334,6 +335,7 @@ def __init__(
334335
self.in_channels = in_channels
335336
self.out_channels = out_channels or in_channels
336337
self.inner_dim = num_attention_heads * attention_head_dim
338+
self.default_ref_method = default_ref_method
337339

338340
self.pe_embedder = EmbedND(dim=attention_head_dim, theta=10000, axes_dim=list(axes_dims_rope))
339341

@@ -361,6 +363,9 @@ def __init__(
361363
for _ in range(num_layers)
362364
])
363365

366+
if self.default_ref_method == "index_timestep_zero":
367+
self.register_buffer("__index_timestep_zero__", torch.tensor([]))
368+
364369
if final_layer:
365370
self.norm_out = LastLayer(self.inner_dim, self.inner_dim, dtype=dtype, device=device, operations=operations)
366371
self.proj_out = operations.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True, dtype=dtype, device=device)
@@ -416,7 +421,7 @@ def _forward(
416421
h = 0
417422
w = 0
418423
index = 0
419-
ref_method = kwargs.get("ref_latents_method", "index")
424+
ref_method = kwargs.get("ref_latents_method", self.default_ref_method)
420425
index_ref_method = (ref_method == "index") or (ref_method == "index_timestep_zero")
421426
timestep_zero = ref_method == "index_timestep_zero"
422427
for ref in ref_latents:

comfy/model_detection.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ def detect_unet_config(state_dict, key_prefix, metadata=None):
259259
dit_config["nerf_tile_size"] = 512
260260
dit_config["nerf_final_head_type"] = "conv" if f"{key_prefix}nerf_final_layer_conv.norm.scale" in state_dict_keys else "linear"
261261
dit_config["nerf_embedder_dtype"] = torch.float32
262-
if "__x0__" in state_dict_keys: # x0 pred
262+
if "{}__x0__".format(key_prefix) in state_dict_keys: # x0 pred
263263
dit_config["use_x0"] = True
264264
else:
265265
dit_config["use_x0"] = False
@@ -618,6 +618,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None):
618618
dit_config["image_model"] = "qwen_image"
619619
dit_config["in_channels"] = state_dict['{}img_in.weight'.format(key_prefix)].shape[1]
620620
dit_config["num_layers"] = count_blocks(state_dict_keys, '{}transformer_blocks.'.format(key_prefix) + '{}.')
621+
if "{}__index_timestep_zero__".format(key_prefix) in state_dict_keys: # 2511
622+
dit_config["default_ref_method"] = "index_timestep_zero"
621623
return dit_config
622624

623625
if '{}visual_transformer_blocks.0.cross_attention.key_norm.weight'.format(key_prefix) in state_dict_keys: # Kandinsky 5

comfy/samplers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -720,7 +720,7 @@ def max_denoise(self, model_wrap, sigmas):
720720
sigma = float(sigmas[0])
721721
return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma
722722

723-
KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
723+
KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "exp_heun_2_x0", "exp_heun_2_x0_sde", "dpm_2", "dpm_2_ancestral",
724724
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu",
725725
"dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_2m_sde_heun", "dpmpp_2m_sde_heun_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm",
726726
"ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp",

comfy_api/latest/_io.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1594,12 +1594,12 @@ async def EXECUTE_NORMALIZED_ASYNC(cls, *args, **kwargs) -> NodeOutput:
15941594

15951595
@final
15961596
@classmethod
1597-
def PREPARE_CLASS_CLONE(cls, v3_data: V3Data) -> type[ComfyNode]:
1597+
def PREPARE_CLASS_CLONE(cls, v3_data: V3Data | None) -> type[ComfyNode]:
15981598
"""Creates clone of real node class to prevent monkey-patching."""
15991599
c_type: type[ComfyNode] = cls if is_class(cls) else type(cls)
16001600
type_clone: type[ComfyNode] = shallow_clone_class(c_type)
16011601
# set hidden
1602-
type_clone.hidden = HiddenHolder.from_dict(v3_data["hidden_inputs"])
1602+
type_clone.hidden = HiddenHolder.from_dict(v3_data["hidden_inputs"] if v3_data else None)
16031603
return type_clone
16041604

16051605
@final

0 commit comments

Comments
 (0)