@@ -122,21 +122,20 @@ def estimate_memory(model, noise_shape, conds):
122122 minimum_memory_required = model .model .memory_required ([noise_shape [0 ]] + list (noise_shape [1 :]), cond_shapes = cond_shapes_min )
123123 return memory_required , minimum_memory_required
124124
125- def prepare_sampling (model : ModelPatcher , noise_shape , conds , model_options = None , skip_load_model = False ):
125+ def prepare_sampling (model : ModelPatcher , noise_shape , conds , model_options = None , force_full_load = False ):
126126 executor = comfy .patcher_extension .WrapperExecutor .new_executor (
127127 _prepare_sampling ,
128128 comfy .patcher_extension .get_all_wrappers (comfy .patcher_extension .WrappersMP .PREPARE_SAMPLING , model_options , is_model_options = True )
129129 )
130- return executor .execute (model , noise_shape , conds , model_options = model_options , skip_load_model = skip_load_model )
130+ return executor .execute (model , noise_shape , conds , model_options = model_options , force_full_load = force_full_load )
131131
132- def _prepare_sampling (model : ModelPatcher , noise_shape , conds , model_options = None , skip_load_model = False ):
132+ def _prepare_sampling (model : ModelPatcher , noise_shape , conds , model_options = None , force_full_load = False ):
133133 real_model : BaseModel = None
134134 models , inference_memory = get_additional_models (conds , model .model_dtype ())
135135 models += get_additional_models_from_model_options (model_options )
136136 models += model .get_nested_additional_models () # TODO: does this require inference_memory update?
137137 memory_required , minimum_memory_required = estimate_memory (model , noise_shape , conds )
138- models_list = [model ] if not skip_load_model else []
139- comfy .model_management .load_models_gpu (models_list + models , memory_required = memory_required + inference_memory , minimum_memory_required = minimum_memory_required + inference_memory )
138+ comfy .model_management .load_models_gpu ([model ] + models , memory_required = memory_required + inference_memory , minimum_memory_required = minimum_memory_required + inference_memory , force_full_load = force_full_load )
140139 real_model = model .model
141140
142141 return real_model , conds , models
0 commit comments