We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 5ebcab3 commit 1199411Copy full SHA for 1199411
comfy/model_management.py
@@ -1107,6 +1107,9 @@ def pin_memory(tensor):
1107
if MAX_PINNED_MEMORY <= 0:
1108
return False
1109
1110
+ if type(tensor) is not torch.nn.parameter.Parameter:
1111
+ return False
1112
+
1113
if not is_device_cpu(tensor.device):
1114
1115
@@ -1116,6 +1119,9 @@ def pin_memory(tensor):
1116
1119
#on the GPU async. So dont trust the CUDA API and guard here
1117
1120
1118
1121
1122
+ if not tensor.is_contiguous():
1123
1124
1125
size = tensor.numel() * tensor.element_size()
1126
if (TOTAL_PINNED_MEMORY + size) > MAX_PINNED_MEMORY:
1127
0 commit comments