Skip to content

Commit 334e74b

Browse files
authored
Merge branch 'master' into portable-manager-update
2 parents f19a2b5 + cabc4d3 commit 334e74b

File tree

109 files changed

+8514
-3250
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

109 files changed

+8514
-3250
lines changed

.ci/update_windows/update.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,10 @@ def pull(repo, remote_name='origin', branch='master'):
6666
try:
6767
ref = repo.lookup_reference('refs/remotes/origin/master')
6868
except:
69-
print("pulling.") # noqa: T201
70-
pull(repo)
69+
print("fetching.") # noqa: T201
70+
for remote in repo.remotes:
71+
if remote.name == "origin":
72+
remote.fetch()
7173
ref = repo.lookup_reference('refs/remotes/origin/master')
7274
repo.checkout(ref)
7375
branch = repo.lookup_branch('master')
@@ -170,3 +172,4 @@ def file_size(f):
170172
shutil.copy(stable_update_script, stable_update_script_to)
171173
except:
172174
pass
175+

.ci/windows_amd_base_files/README_VERY_IMPORTANT.txt

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
As of the time of writing this you need this preview driver for best results:
2-
https://www.amd.com/en/resources/support-articles/release-notes/RN-AMDGPU-WINDOWS-PYTORCH-PREVIEW.html
1+
As of the time of writing this you need this driver for best results:
2+
https://www.amd.com/en/resources/support-articles/release-notes/RN-AMDGPU-WINDOWS-PYTORCH-7-1-1.html
33

44
HOW TO RUN:
55

@@ -25,3 +25,4 @@ In the ComfyUI directory you will find a file: extra_model_paths.yaml.example
2525
Rename this file to: extra_model_paths.yaml and edit it with your favorite text editor.
2626

2727

28+

.github/workflows/release-stable-all.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,11 @@ jobs:
6565
contents: "write"
6666
packages: "write"
6767
pull-requests: "read"
68-
name: "Release AMD ROCm 6.4.4"
68+
name: "Release AMD ROCm 7.1.1"
6969
uses: ./.github/workflows/stable-release.yml
7070
with:
7171
git_tag: ${{ inputs.git_tag }}
72-
cache_tag: "rocm644"
72+
cache_tag: "rocm711"
7373
python_minor: "12"
7474
python_patch: "10"
7575
rel_name: "amd"

CODEOWNERS

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,2 @@
11
# Admins
2-
* @comfyanonymous
3-
* @kosinkadink
2+
* @comfyanonymous @kosinkadink @guill

README.md

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith
6868
- [Qwen Image](https://comfyanonymous.github.io/ComfyUI_examples/qwen_image/)
6969
- [Hunyuan Image 2.1](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_image/)
7070
- [Flux 2](https://comfyanonymous.github.io/ComfyUI_examples/flux2/)
71+
- [Z Image](https://comfyanonymous.github.io/ComfyUI_examples/z_image/)
7172
- Image Editing Models
7273
- [Omnigen 2](https://comfyanonymous.github.io/ComfyUI_examples/omnigen/)
7374
- [Flux Kontext](https://comfyanonymous.github.io/ComfyUI_examples/flux/#flux-kontext-image-editing-model)
@@ -80,6 +81,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith
8081
- [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/)
8182
- [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/)
8283
- [Wan 2.2](https://comfyanonymous.github.io/ComfyUI_examples/wan22/)
84+
- [Hunyuan Video 1.5](https://docs.comfy.org/tutorials/video/hunyuan/hunyuan-video-1-5)
8385
- Audio Models
8486
- [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/)
8587
- [ACE Step](https://comfyanonymous.github.io/ComfyUI_examples/audio/)
@@ -318,6 +320,32 @@ For models compatible with Iluvatar Extension for PyTorch. Here's a step-by-step
318320
1. Install the Iluvatar Corex Toolkit by adhering to the platform-specific instructions on the [Installation](https://support.iluvatar.com/#/DocumentCentre?id=1&nameCenter=2&productId=520117912052801536)
319321
2. Launch ComfyUI by running `python main.py`
320322

323+
324+
## [ComfyUI-Manager](https://github.com/Comfy-Org/ComfyUI-Manager/tree/manager-v4)
325+
326+
**ComfyUI-Manager** is an extension that allows you to easily install, update, and manage custom nodes for ComfyUI.
327+
328+
### Setup
329+
330+
1. Install the manager dependencies:
331+
```bash
332+
pip install -r manager_requirements.txt
333+
```
334+
335+
2. Enable the manager with the `--enable-manager` flag when running ComfyUI:
336+
```bash
337+
python main.py --enable-manager
338+
```
339+
340+
### Command Line Options
341+
342+
| Flag | Description |
343+
|------|-------------|
344+
| `--enable-manager` | Enable ComfyUI-Manager |
345+
| `--enable-manager-legacy-ui` | Use the legacy manager UI instead of the new UI (requires `--enable-manager`) |
346+
| `--disable-manager-ui` | Disable the manager UI and endpoints while keeping background features like security checks and scheduled installation completion (requires `--enable-manager`) |
347+
348+
321349
# Running
322350

323351
```python main.py```

app/user_manager.py

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,22 +59,26 @@ def get_request_user_id(self, request):
5959
user = "default"
6060
if args.multi_user and "comfy-user" in request.headers:
6161
user = request.headers["comfy-user"]
62+
# Block System Users (use same error message to prevent probing)
63+
if user.startswith(folder_paths.SYSTEM_USER_PREFIX):
64+
raise KeyError("Unknown user: " + user)
6265

6366
if user not in self.users:
6467
raise KeyError("Unknown user: " + user)
6568

6669
return user
6770

6871
def get_request_user_filepath(self, request, file, type="userdata", create_dir=True):
69-
user_directory = folder_paths.get_user_directory()
70-
7172
if type == "userdata":
72-
root_dir = user_directory
73+
root_dir = folder_paths.get_user_directory()
7374
else:
7475
raise KeyError("Unknown filepath type:" + type)
7576

7677
user = self.get_request_user_id(request)
77-
path = user_root = os.path.abspath(os.path.join(root_dir, user))
78+
user_root = folder_paths.get_public_user_directory(user)
79+
if user_root is None:
80+
return None
81+
path = user_root
7882

7983
# prevent leaving /{type}
8084
if os.path.commonpath((root_dir, user_root)) != root_dir:
@@ -101,7 +105,11 @@ def add_user(self, name):
101105
name = name.strip()
102106
if not name:
103107
raise ValueError("username not provided")
108+
if name.startswith(folder_paths.SYSTEM_USER_PREFIX):
109+
raise ValueError("System User prefix not allowed")
104110
user_id = re.sub("[^a-zA-Z0-9-_]+", '-', name)
111+
if user_id.startswith(folder_paths.SYSTEM_USER_PREFIX):
112+
raise ValueError("System User prefix not allowed")
105113
user_id = user_id + "_" + str(uuid.uuid4())
106114

107115
self.users[user_id] = name
@@ -132,7 +140,10 @@ async def post_users(request):
132140
if username in self.users.values():
133141
return web.json_response({"error": "Duplicate username."}, status=400)
134142

135-
user_id = self.add_user(username)
143+
try:
144+
user_id = self.add_user(username)
145+
except ValueError as e:
146+
return web.json_response({"error": str(e)}, status=400)
136147
return web.json_response(user_id)
137148

138149
@routes.get("/userdata")
@@ -424,7 +435,7 @@ async def move_userdata(request):
424435
return source
425436

426437
dest = get_user_data_path(request, check_exists=False, param="dest")
427-
if not isinstance(source, str):
438+
if not isinstance(dest, str):
428439
return dest
429440

430441
overwrite = request.query.get("overwrite", 'true') != "false"

comfy/cli_args.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,8 @@ class LatentPreviewMethod(enum.Enum):
137137

138138
parser.add_argument("--reserve-vram", type=float, default=None, help="Set the amount of vram in GB you want to reserve for use by your OS/other software. By default some amount is reserved depending on your OS.")
139139

140-
parser.add_argument("--async-offload", action="store_true", help="Use async weight offloading.")
140+
parser.add_argument("--async-offload", nargs='?', const=2, type=int, default=None, metavar="NUM_STREAMS", help="Use async weight offloading. An optional argument controls the amount of offload streams. Default is 2. Enabled by default on Nvidia.")
141+
parser.add_argument("--disable-async-offload", action="store_true", help="Disable async weight offloading.")
141142

142143
parser.add_argument("--force-non-blocking", action="store_true", help="Force ComfyUI to use non-blocking operations for all applicable tensors. This may improve performance on some non-Nvidia systems but can cause issues with some workflows.")
143144

comfy/context_windows.py

Lines changed: 90 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -51,26 +51,36 @@ def execute(self, calc_cond_batch: Callable, model: BaseModel, conds: list[list[
5151

5252

5353
class IndexListContextWindow(ContextWindowABC):
54-
def __init__(self, index_list: list[int], dim: int=0):
54+
def __init__(self, index_list: list[int], dim: int=0, total_frames: int=0):
5555
self.index_list = index_list
5656
self.context_length = len(index_list)
5757
self.dim = dim
58+
self.total_frames = total_frames
59+
self.center_ratio = (min(index_list) + max(index_list)) / (2 * total_frames)
5860

59-
def get_tensor(self, full: torch.Tensor, device=None, dim=None) -> torch.Tensor:
61+
def get_tensor(self, full: torch.Tensor, device=None, dim=None, retain_index_list=[]) -> torch.Tensor:
6062
if dim is None:
6163
dim = self.dim
6264
if dim == 0 and full.shape[dim] == 1:
6365
return full
64-
idx = [slice(None)] * dim + [self.index_list]
65-
return full[idx].to(device)
66+
idx = tuple([slice(None)] * dim + [self.index_list])
67+
window = full[idx]
68+
if retain_index_list:
69+
idx = tuple([slice(None)] * dim + [retain_index_list])
70+
window[idx] = full[idx]
71+
return window.to(device)
6672

6773
def add_window(self, full: torch.Tensor, to_add: torch.Tensor, dim=None) -> torch.Tensor:
6874
if dim is None:
6975
dim = self.dim
70-
idx = [slice(None)] * dim + [self.index_list]
76+
idx = tuple([slice(None)] * dim + [self.index_list])
7177
full[idx] += to_add
7278
return full
7379

80+
def get_region_index(self, num_regions: int) -> int:
81+
region_idx = int(self.center_ratio * num_regions)
82+
return min(max(region_idx, 0), num_regions - 1)
83+
7484

7585
class IndexListCallbacks:
7686
EVALUATE_CONTEXT_WINDOWS = "evaluate_context_windows"
@@ -94,7 +104,8 @@ class ContextFuseMethod:
94104

95105
ContextResults = collections.namedtuple("ContextResults", ['window_idx', 'sub_conds_out', 'sub_conds', 'window'])
96106
class IndexListContextHandler(ContextHandlerABC):
97-
def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMethod, context_length: int=1, context_overlap: int=0, context_stride: int=1, closed_loop=False, dim=0):
107+
def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMethod, context_length: int=1, context_overlap: int=0, context_stride: int=1,
108+
closed_loop: bool=False, dim:int=0, freenoise: bool=False, cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False):
98109
self.context_schedule = context_schedule
99110
self.fuse_method = fuse_method
100111
self.context_length = context_length
@@ -103,13 +114,18 @@ def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMe
103114
self.closed_loop = closed_loop
104115
self.dim = dim
105116
self._step = 0
117+
self.freenoise = freenoise
118+
self.cond_retain_index_list = [int(x.strip()) for x in cond_retain_index_list.split(",")] if cond_retain_index_list else []
119+
self.split_conds_to_windows = split_conds_to_windows
106120

107121
self.callbacks = {}
108122

109123
def should_use_context(self, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]) -> bool:
110124
# for now, assume first dim is batch - should have stored on BaseModel in actual implementation
111125
if x_in.size(self.dim) > self.context_length:
112-
logging.info(f"Using context windows {self.context_length} for {x_in.size(self.dim)} frames.")
126+
logging.info(f"Using context windows {self.context_length} with overlap {self.context_overlap} for {x_in.size(self.dim)} frames.")
127+
if self.cond_retain_index_list:
128+
logging.info(f"Retaining original cond for indexes: {self.cond_retain_index_list}")
113129
return True
114130
return False
115131

@@ -123,6 +139,11 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde
123139
return None
124140
# reuse or resize cond items to match context requirements
125141
resized_cond = []
142+
# if multiple conds, split based on primary region
143+
if self.split_conds_to_windows and len(cond_in) > 1:
144+
region = window.get_region_index(len(cond_in))
145+
logging.info(f"Splitting conds to windows; using region {region} for window {window[0]}-{window[-1]} with center ratio {window.center_ratio:.3f}")
146+
cond_in = [cond_in[region]]
126147
# cond object is a list containing a dict - outer list is irrelevant, so just loop through it
127148
for actual_cond in cond_in:
128149
resized_actual_cond = actual_cond.copy()
@@ -146,12 +167,19 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde
146167
# when in dictionary, look for tensors and CONDCrossAttn [comfy/conds.py] (has cond attr that is a tensor)
147168
for cond_key, cond_value in new_cond_item.items():
148169
if isinstance(cond_value, torch.Tensor):
149-
if cond_value.ndim < self.dim and cond_value.size(0) == x_in.size(self.dim):
170+
if (self.dim < cond_value.ndim and cond_value(self.dim) == x_in.size(self.dim)) or \
171+
(cond_value.ndim < self.dim and cond_value.size(0) == x_in.size(self.dim)):
150172
new_cond_item[cond_key] = window.get_tensor(cond_value, device)
173+
# Handle audio_embed (temporal dim is 1)
174+
elif cond_key == "audio_embed" and hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor):
175+
audio_cond = cond_value.cond
176+
if audio_cond.ndim > 1 and audio_cond.size(1) == x_in.size(self.dim):
177+
new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(audio_cond, device, dim=1))
151178
# if has cond that is a Tensor, check if needs to be subset
152179
elif hasattr(cond_value, "cond") and isinstance(cond_value.cond, torch.Tensor):
153-
if cond_value.cond.ndim < self.dim and cond_value.cond.size(0) == x_in.size(self.dim):
154-
new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(cond_value.cond, device))
180+
if (self.dim < cond_value.cond.ndim and cond_value.cond.size(self.dim) == x_in.size(self.dim)) or \
181+
(cond_value.cond.ndim < self.dim and cond_value.cond.size(0) == x_in.size(self.dim)):
182+
new_cond_item[cond_key] = cond_value._copy_with(window.get_tensor(cond_value.cond, device, retain_index_list=self.cond_retain_index_list))
155183
elif cond_key == "num_video_frames": # for SVD
156184
new_cond_item[cond_key] = cond_value._copy_with(cond_value.cond)
157185
new_cond_item[cond_key].cond = window.context_length
@@ -164,7 +192,7 @@ def get_resized_cond(self, cond_in: list[dict], x_in: torch.Tensor, window: Inde
164192
return resized_cond
165193

166194
def set_step(self, timestep: torch.Tensor, model_options: dict[str]):
167-
mask = torch.isclose(model_options["transformer_options"]["sample_sigmas"], timestep, rtol=0.0001)
195+
mask = torch.isclose(model_options["transformer_options"]["sample_sigmas"], timestep[0], rtol=0.0001)
168196
matches = torch.nonzero(mask)
169197
if torch.numel(matches) == 0:
170198
raise Exception("No sample_sigmas matched current timestep; something went wrong.")
@@ -173,7 +201,7 @@ def set_step(self, timestep: torch.Tensor, model_options: dict[str]):
173201
def get_context_windows(self, model: BaseModel, x_in: torch.Tensor, model_options: dict[str]) -> list[IndexListContextWindow]:
174202
full_length = x_in.size(self.dim) # TODO: choose dim based on model
175203
context_windows = self.context_schedule.func(full_length, self, model_options)
176-
context_windows = [IndexListContextWindow(window, dim=self.dim) for window in context_windows]
204+
context_windows = [IndexListContextWindow(window, dim=self.dim, total_frames=full_length) for window in context_windows]
177205
return context_windows
178206

179207
def execute(self, calc_cond_batch: Callable, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]):
@@ -250,8 +278,8 @@ def combine_context_window_results(self, x_in: torch.Tensor, sub_conds_out, sub_
250278
prev_weight = (bias_total / (bias_total + bias))
251279
new_weight = (bias / (bias_total + bias))
252280
# account for dims of tensors
253-
idx_window = [slice(None)] * self.dim + [idx]
254-
pos_window = [slice(None)] * self.dim + [pos]
281+
idx_window = tuple([slice(None)] * self.dim + [idx])
282+
pos_window = tuple([slice(None)] * self.dim + [pos])
255283
# apply new values
256284
conds_final[i][idx_window] = conds_final[i][idx_window] * prev_weight + sub_conds_out[i][pos_window] * new_weight
257285
biases_final[i][idx] = bias_total + bias
@@ -287,6 +315,28 @@ def create_prepare_sampling_wrapper(model: ModelPatcher):
287315
)
288316

289317

318+
def _sampler_sample_wrapper(executor, guider, sigmas, extra_args, callback, noise, *args, **kwargs):
319+
model_options = extra_args.get("model_options", None)
320+
if model_options is None:
321+
raise Exception("model_options not found in sampler_sample_wrapper; this should never happen, something went wrong.")
322+
handler: IndexListContextHandler = model_options.get("context_handler", None)
323+
if handler is None:
324+
raise Exception("context_handler not found in sampler_sample_wrapper; this should never happen, something went wrong.")
325+
if not handler.freenoise:
326+
return executor(guider, sigmas, extra_args, callback, noise, *args, **kwargs)
327+
noise = apply_freenoise(noise, handler.dim, handler.context_length, handler.context_overlap, extra_args["seed"])
328+
329+
return executor(guider, sigmas, extra_args, callback, noise, *args, **kwargs)
330+
331+
332+
def create_sampler_sample_wrapper(model: ModelPatcher):
333+
model.add_wrapper_with_key(
334+
comfy.patcher_extension.WrappersMP.SAMPLER_SAMPLE,
335+
"ContextWindows_sampler_sample",
336+
_sampler_sample_wrapper
337+
)
338+
339+
290340
def match_weights_to_dim(weights: list[float], x_in: torch.Tensor, dim: int, device=None) -> torch.Tensor:
291341
total_dims = len(x_in.shape)
292342
weights_tensor = torch.Tensor(weights).to(device=device)
@@ -538,3 +588,29 @@ def shift_window_to_end(window: list[int], num_frames: int):
538588
for i in range(len(window)):
539589
# 2) add end_delta to each val to slide windows to end
540590
window[i] = window[i] + end_delta
591+
592+
593+
# https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved/blob/90fb1331201a4b29488089e4fbffc0d82cc6d0a9/animatediff/sample_settings.py#L465
594+
def apply_freenoise(noise: torch.Tensor, dim: int, context_length: int, context_overlap: int, seed: int):
595+
logging.info("Context windows: Applying FreeNoise")
596+
generator = torch.Generator(device='cpu').manual_seed(seed)
597+
latent_video_length = noise.shape[dim]
598+
delta = context_length - context_overlap
599+
600+
for start_idx in range(0, latent_video_length - context_length, delta):
601+
place_idx = start_idx + context_length
602+
603+
actual_delta = min(delta, latent_video_length - place_idx)
604+
if actual_delta <= 0:
605+
break
606+
607+
list_idx = torch.randperm(actual_delta, generator=generator, device='cpu') + start_idx
608+
609+
source_slice = [slice(None)] * noise.ndim
610+
source_slice[dim] = list_idx
611+
target_slice = [slice(None)] * noise.ndim
612+
target_slice[dim] = slice(place_idx, place_idx + actual_delta)
613+
614+
noise[tuple(target_slice)] = noise[tuple(source_slice)]
615+
616+
return noise

0 commit comments

Comments
 (0)