Skip to content

Commit d659f1c

Browse files
committed
print
1 parent a901420 commit d659f1c

File tree

1 file changed

+3
-0
lines changed

1 file changed

+3
-0
lines changed

src/diffusers/pipelines/pipeline_utils.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1039,9 +1039,12 @@ def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[t
10391039
device_type = torch_device.type
10401040
device = torch.device(f"{device_type}:{self._offload_gpu_id}")
10411041
self._offload_device = device
1042+
print("Initial assignments done.")
10421043

10431044
self.to("cpu", silence_dtype_warnings=True)
1045+
print("placed on CPU.")
10441046
device_mod = getattr(torch, device.type, None)
1047+
print(f"{device=}")
10451048
if hasattr(device_mod, "empty_cache") and device_mod.is_available():
10461049
device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
10471050
print("Empty cache called.")

0 commit comments

Comments
 (0)