Skip to content

Commit a405f14

Browse files
committed
Fix SpandrelImageToImageModel size calculation for the model cache.
1 parent 9d37392 commit a405f14

File tree

2 files changed

+9
-1
lines changed

2 files changed

+9
-1
lines changed

invokeai/backend/model_manager/load/model_util.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from invokeai.backend.lora import LoRAModelRaw
1616
from invokeai.backend.model_manager.config import AnyModel
1717
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
18+
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
1819
from invokeai.backend.textual_inversion import TextualInversionModelRaw
1920

2021

@@ -33,7 +34,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
3334
elif isinstance(model, CLIPTokenizer):
3435
# TODO(ryand): Accurately calculate the tokenizer's size. It's small enough that it shouldn't matter for now.
3536
return 0
36-
elif isinstance(model, (TextualInversionModelRaw, IPAdapter, LoRAModelRaw)):
37+
elif isinstance(model, (TextualInversionModelRaw, IPAdapter, LoRAModelRaw, SpandrelImageToImageModel)):
3738
return model.calc_size()
3839
else:
3940
# TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the

invokeai/backend/spandrel_image_to_image_model.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,3 +125,10 @@ def device(self) -> torch.device:
125125
def dtype(self) -> torch.dtype:
126126
"""The dtype of the underlying model."""
127127
return self._spandrel_model.dtype
128+
129+
def calc_size(self) -> int:
130+
"""Get size of the model in memory in bytes."""
131+
# HACK(ryand): Fix this issue with circular imports.
132+
from invokeai.backend.model_manager.load.model_util import calc_module_size
133+
134+
return calc_module_size(self._spandrel_model.model)

0 commit comments

Comments
 (0)