Skip to content

Commit f5550e3

Browse files
committed
print
1 parent 1d1248a commit f5550e3

File tree

1 file changed

+3
-0
lines changed

1 file changed

+3
-0
lines changed

src/diffusers/pipelines/pipeline_utils.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -990,6 +990,7 @@ def remove_all_hooks(self):
990990
accelerate.hooks.remove_hook_from_module(model, recurse=True)
991991
print("Done removing from the current model.")
992992
self._all_hooks = []
993+
print("Done in remove.")
993994

994995
def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
995996
r"""
@@ -1064,6 +1065,7 @@ def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[t
10641065

10651066
_, hook = cpu_offload_with_hook(model, device, prev_module_hook=hook)
10661067
self._all_hooks.append(hook)
1068+
print("Initial hooks appended.")
10671069

10681070
# CPU offload models that are not in the seq chain unless they are explicitly excluded
10691071
# these models will stay on CPU until maybe_free_model_hooks is called
@@ -1077,6 +1079,7 @@ def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[t
10771079
else:
10781080
_, hook = cpu_offload_with_hook(model, device)
10791081
self._all_hooks.append(hook)
1082+
print("Done second time.")
10801083

10811084
def maybe_free_model_hooks(self):
10821085
r"""

0 commit comments

Comments
 (0)