Skip to content

Commit e1d2cff

Browse files
author
sync-bot
committed
Automated PR - 2026-01-06
1 parent 08f892e commit e1d2cff

File tree

91 files changed

+34036
-65447
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

91 files changed

+34036
-65447
lines changed

.gitattributes

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,10 @@
11
**/*.png filter=lfs diff=lfs merge=lfs -text
22
**/*.mp4 filter=lfs diff=lfs merge=lfs -text
3+
**/*.jpg filter=lfs diff=lfs merge=lfs -text
4+
5+
example_workflows/low_level/end.jpg -filter -diff -merge -text
6+
example_workflows/low_level/fox.jpg -filter -diff -merge -text
7+
example_workflows/low_level/shrek2.jpg -filter -diff -merge -text
8+
example_workflows/low_level/start.jpg -filter -diff -merge -text
9+
example_workflows/tricks/shrek2.jpg -filter -diff -merge -text
10+
example_workflows/tricks/shrek3.jpg -filter -diff -merge -text

LICENSE

Lines changed: 373 additions & 193 deletions
Large diffs are not rendered by default.

README.md

Lines changed: 68 additions & 133 deletions
Large diffs are not rendered by default.

__init__.py

Lines changed: 36 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,33 @@
11
from .decoder_noise import DecoderNoise
2+
from .dynamic_conditioning import DynamicConditioning
23
from .easy_samplers import (
34
LinearOverlapLatentTransition,
45
LTXVBaseSampler,
56
LTXVExtendSampler,
67
LTXVInContextSampler,
78
)
8-
from .film_grain import LTXVFilmGrain
9+
from .gemma_encoder import LTXVGemmaCLIPModelLoader, LTXVGemmaEnhancePrompt
10+
from .gemma_encoder_mgpu_loader import LTXVGemmaCLIPModelLoaderMGPU
911
from .guide import LTXVAddGuideAdvanced
10-
from .latent_adain import LTXVAdainLatent
11-
from .latent_upsampler import LTXVLatentUpsampler
12-
from .latents import LTXVSelectLatents, LTXVSetVideoLatentNoiseMasks
12+
from .guiders import GuiderParametersNode, MultimodalGuiderNode
13+
from .latent_norm import (
14+
LTXVAdainLatent,
15+
LTXVPerStepAdainPatcher,
16+
LTXVPerStepStatNormPatcher,
17+
LTXVStatNormLatent,
18+
)
19+
from .latents import (
20+
LTXVAddLatentGuide,
21+
LTXVImgToVideoConditionOnly,
22+
LTXVSelectLatents,
23+
LTXVSetVideoLatentNoiseMasks,
24+
)
1325
from .looping_sampler import LTXVLoopingSampler, MultiPromptProvider
26+
from .low_vram_loaders import (
27+
LowVRAMAudioVAELoader,
28+
LowVRAMCheckpointLoader,
29+
LowVRAMLatentUpscaleModelLoader,
30+
)
1431
from .masks import LTXVPreprocessMasks
1532
from .nodes_registry import NODE_CLASS_MAPPINGS as RUNTIME_NODE_CLASS_MAPPINGS
1633
from .nodes_registry import (
@@ -30,21 +47,22 @@
3047
from .tricks import NODE_CLASS_MAPPINGS as TRICKS_NODE_CLASS_MAPPINGS
3148
from .tricks import NODE_DISPLAY_NAME_MAPPINGS as TRICKS_NODE_DISPLAY_NAME_MAPPINGS
3249
from .utiltily_nodes import ImageToCPU
33-
from .vae_patcher.vae_patcher import LTXVPatcherVAE
50+
from .vae_patcher import LTXVPatcherVAE
3451

3552
# Static node mappings, required for ComfyUI-Manager mapping to work
3653
NODE_CLASS_MAPPINGS = {
3754
"Set VAE Decoder Noise": DecoderNoise,
3855
"LTXVLinearOverlapLatentTransition": LinearOverlapLatentTransition,
3956
"LTXVAddGuideAdvanced": LTXVAddGuideAdvanced,
57+
"LTXVAddLatentGuide": LTXVAddLatentGuide,
4058
"LTXVAdainLatent": LTXVAdainLatent,
59+
"LTXVImgToVideoConditionOnly": LTXVImgToVideoConditionOnly,
60+
"LTXVPerStepAdainPatcher": LTXVPerStepAdainPatcher,
4161
"LTXVApplySTG": LTXVApplySTG,
4262
"LTXVBaseSampler": LTXVBaseSampler,
4363
"LTXVInContextSampler": LTXVInContextSampler,
4464
"LTXVExtendSampler": LTXVExtendSampler,
45-
"LTXVFilmGrain": LTXVFilmGrain,
4665
"LTXVPreprocessMasks": LTXVPreprocessMasks,
47-
"LTXVLatentUpsampler": LTXVLatentUpsampler,
4866
"LTXVPatcherVAE": LTXVPatcherVAE,
4967
"LTXVPromptEnhancer": LTXVPromptEnhancer,
5068
"LTXVPromptEnhancerLoader": LTXVPromptEnhancerLoader,
@@ -55,11 +73,22 @@
5573
"LTXVTiledSampler": LTXVTiledSampler,
5674
"LTXVLoopingSampler": LTXVLoopingSampler,
5775
"LTXVTiledVAEDecode": LTXVTiledVAEDecode,
76+
"MultimodalGuider": MultimodalGuiderNode,
77+
"GuiderParameters": GuiderParametersNode,
5878
"STGAdvancedPresets": STGAdvancedPresetsNode,
5979
"STGGuiderAdvanced": STGGuiderAdvancedNode,
6080
"STGGuiderNode": STGGuiderNode,
6181
"LTXVMultiPromptProvider": MultiPromptProvider,
6282
"ImageToCPU": ImageToCPU,
83+
"LTXVStatNormLatent": LTXVStatNormLatent,
84+
"LTXVPerStepStatNormPatcher": LTXVPerStepStatNormPatcher,
85+
"LTXVGemmaCLIPModelLoader": LTXVGemmaCLIPModelLoader,
86+
"LTXVGemmaCLIPModelLoaderMGPU": LTXVGemmaCLIPModelLoaderMGPU,
87+
"LTXVGemmaEnhancePrompt": LTXVGemmaEnhancePrompt,
88+
"DynamicConditioning": DynamicConditioning,
89+
"LowVRAMCheckpointLoader": LowVRAMCheckpointLoader,
90+
"LowVRAMAudioVAELoader": LowVRAMAudioVAELoader,
91+
"LowVRAMLatentUpscaleModelLoader": LowVRAMLatentUpscaleModelLoader,
6392
}
6493

6594
# Consistent display names between static and dynamic node mappings in nodes_registry.py,

dynamic_conditioning.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import torch
2+
3+
from .nodes_registry import comfy_node
4+
5+
6+
@comfy_node(name="DynamicConditioning")
7+
class DynamicConditioning:
8+
@classmethod
9+
def INPUT_TYPES(s):
10+
return {
11+
"required": {
12+
"model": ("MODEL",),
13+
"power": ("FLOAT", {"default": 1.3, "min": 1, "max": 2, "step": 0.01}),
14+
"only_first_frame": ("BOOLEAN", {"default": True}),
15+
}
16+
}
17+
18+
RETURN_TYPES = ("MODEL",)
19+
FUNCTION = "apply"
20+
CATEGORY = "lightricks/LTXV"
21+
INIT = False
22+
23+
def apply(self, model, power, only_first_frame):
24+
self.only_first_frame = only_first_frame
25+
self.power = power
26+
model = model.clone()
27+
model.set_model_denoise_mask_function(self.forward)
28+
return (model,)
29+
30+
def find_step(self, sigma: torch.Tensor, step_sigmas: torch.Tensor):
31+
for i, step_sigma in enumerate(step_sigmas):
32+
if step_sigma <= sigma:
33+
return i
34+
return len(step_sigmas) - 1
35+
36+
def forward(
37+
self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict
38+
):
39+
model = extra_options["model"]
40+
step_sigmas = extra_options["sigmas"]
41+
step = self.find_step(sigma, step_sigmas)
42+
# In order to apply power multiple times, this is the same as applying power number of times equal to step
43+
power = self.power**step
44+
denoise_mask = denoise_mask.clone()
45+
if self.only_first_frame:
46+
num_channels = model.model_patcher.model.diffusion_model.in_channels
47+
denoise_mask[:, :num_channels, :1] **= power
48+
else:
49+
denoise_mask **= power
50+
# make sure to update the denoise mask in the model, to get correct timestep values for all tokens
51+
for k in model.conds:
52+
if "positive" in k or "negative" in k:
53+
for cond in model.conds[k]:
54+
if "model_conds" in cond and "denoise_mask" in cond["model_conds"]:
55+
cond["model_conds"]["denoise_mask"].cond = denoise_mask
56+
# print(f"DynamicConditioning: power: {power}, step: {step}, sigma: {sigma}, step_sigmas: {step_sigmas}")
57+
return denoise_mask

0 commit comments

Comments
 (0)