2
2
"""Various utility functions needed by the loader and caching system."""
3
3
4
4
import json
5
+ import logging
5
6
from pathlib import Path
6
7
from typing import Optional
7
8
8
9
import torch
9
- from diffusers import DiffusionPipeline
10
+ from diffusers .pipelines .pipeline_utils import DiffusionPipeline
11
+ from diffusers .schedulers .scheduling_utils import SchedulerMixin
12
+ from transformers import CLIPTokenizer
10
13
14
+ from invokeai .backend .ip_adapter .ip_adapter import IPAdapter
15
+ from invokeai .backend .lora import LoRAModelRaw
11
16
from invokeai .backend .model_manager .config import AnyModel
12
17
from invokeai .backend .onnx .onnx_runtime import IAIOnnxRuntimeModel
18
+ from invokeai .backend .textual_inversion import TextualInversionModelRaw
13
19
14
20
15
- def calc_model_size_by_data (model : AnyModel ) -> int :
21
+ def calc_model_size_by_data (logger : logging . Logger , model : AnyModel ) -> int :
16
22
"""Get size of a model in memory in bytes."""
23
+ # TODO(ryand): We should create a CacheableModel interface for all models, and move the size calculations down to
24
+ # the models themselves.
17
25
if isinstance (model , DiffusionPipeline ):
18
26
return _calc_pipeline_by_data (model )
19
27
elif isinstance (model , torch .nn .Module ):
20
- return _calc_model_by_data (model )
28
+ return calc_module_size (model )
21
29
elif isinstance (model , IAIOnnxRuntimeModel ):
22
30
return _calc_onnx_model_by_data (model )
31
+ elif isinstance (model , SchedulerMixin ):
32
+ return 0
33
+ elif isinstance (model , CLIPTokenizer ):
34
+ # TODO(ryand): Accurately calculate the tokenizer's size. It's small enough that it shouldn't matter for now.
35
+ return 0
36
+ elif isinstance (model , (TextualInversionModelRaw , IPAdapter , LoRAModelRaw )):
37
+ return model .calc_size ()
23
38
else :
39
+ # TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the
40
+ # supported model types.
41
+ logger .error (
42
+ f"Failed to calculate model size for unexpected model type: { type (model )} . The model will be treated as "
43
+ "having size 0."
44
+ )
24
45
return 0
25
46
26
47
@@ -30,11 +51,12 @@ def _calc_pipeline_by_data(pipeline: DiffusionPipeline) -> int:
30
51
for submodel_key in pipeline .components .keys ():
31
52
submodel = getattr (pipeline , submodel_key )
32
53
if submodel is not None and isinstance (submodel , torch .nn .Module ):
33
- res += _calc_model_by_data (submodel )
54
+ res += calc_module_size (submodel )
34
55
return res
35
56
36
57
37
- def _calc_model_by_data (model : torch .nn .Module ) -> int :
58
+ def calc_module_size (model : torch .nn .Module ) -> int :
59
+ """Calculate the size (in bytes) of a torch.nn.Module."""
38
60
mem_params = sum ([param .nelement () * param .element_size () for param in model .parameters ()])
39
61
mem_bufs = sum ([buf .nelement () * buf .element_size () for buf in model .buffers ()])
40
62
mem : int = mem_params + mem_bufs # in bytes
0 commit comments