Skip to content

Commit bf7dc63

Browse files
skip_load_model -> force_full_load (#11390)
This should be a bit more clear and less prone to potential breakage if the logic of the load models changes a bit.
1 parent 86dbb89 commit bf7dc63

File tree

2 files changed

+5
-6
lines changed

2 files changed

+5
-6
lines changed

comfy/sampler_helpers.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -122,21 +122,20 @@ def estimate_memory(model, noise_shape, conds):
122122
minimum_memory_required = model.model.memory_required([noise_shape[0]] + list(noise_shape[1:]), cond_shapes=cond_shapes_min)
123123
return memory_required, minimum_memory_required
124124

125-
def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, skip_load_model=False):
125+
def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, force_full_load=False):
126126
executor = comfy.patcher_extension.WrapperExecutor.new_executor(
127127
_prepare_sampling,
128128
comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.PREPARE_SAMPLING, model_options, is_model_options=True)
129129
)
130-
return executor.execute(model, noise_shape, conds, model_options=model_options, skip_load_model=skip_load_model)
130+
return executor.execute(model, noise_shape, conds, model_options=model_options, force_full_load=force_full_load)
131131

132-
def _prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, skip_load_model=False):
132+
def _prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None, force_full_load=False):
133133
real_model: BaseModel = None
134134
models, inference_memory = get_additional_models(conds, model.model_dtype())
135135
models += get_additional_models_from_model_options(model_options)
136136
models += model.get_nested_additional_models() # TODO: does this require inference_memory update?
137137
memory_required, minimum_memory_required = estimate_memory(model, noise_shape, conds)
138-
models_list = [model] if not skip_load_model else []
139-
comfy.model_management.load_models_gpu(models_list + models, memory_required=memory_required + inference_memory, minimum_memory_required=minimum_memory_required + inference_memory)
138+
comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required + inference_memory, minimum_memory_required=minimum_memory_required + inference_memory, force_full_load=force_full_load)
140139
real_model = model.model
141140

142141
return real_model, conds, models

comfy_extras/nodes_train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def outer_sample(
4444
noise.shape,
4545
self.conds,
4646
self.model_options,
47-
skip_load_model=True, # skip load model as we manage it in TrainLoraNode.execute()
47+
force_full_load=True, # mirror behavior in TrainLoraNode.execute() to keep model loaded
4848
)
4949
)
5050
device = self.model_patcher.load_device

0 commit comments

Comments
 (0)