@@ -456,18 +456,25 @@ def load_from_dict(self, state_dict):
456456
457457
458458class KohyaLoraManager :
459- lora_path = None
460- vector_length_cache_path = None
461459
462460 def __init__ (self , pipe ):
463- self .lora_path = Path (global_lora_models_dir ())
464461 self .vector_length_cache_path = self .lora_path / '.vectorlength.cache'
465462 self .unet = pipe .unet
466463 self .wrapper = LoRAModuleWrapper (pipe .unet , pipe .text_encoder )
467464 self .text_encoder = pipe .text_encoder
468465 self .device = torch .device (choose_torch_device ())
469466 self .dtype = pipe .unet .dtype
470467
468+ @classmethod
469+ @property
470+ def lora_path (cls )-> Path :
471+ return Path (global_lora_models_dir ())
472+
473+ @classmethod
474+ @property
475+ def vector_length_cache_path (cls )-> Path :
476+ return cls .lora_path / '.vectorlength.cache'
477+
471478 def load_lora_module (self , name , path_file , multiplier : float = 1.0 ):
472479 print (f" | Found lora { name } at { path_file } " )
473480 if path_file .suffix == ".safetensors" :
@@ -568,7 +575,6 @@ def vector_length_from_checkpoint_file(self, checkpoint_path: Path) -> int:
568575class LoraVectorLengthCache (object ):
569576 def __init__ (self , cache_path : Path ):
570577 self .cache_path = cache_path
571- print (f'DEBUG: lock path = { Path (cache_path .parent , ".cachelock" )} ' )
572578 self .lock = FileLock (Path (cache_path .parent , ".cachelock" ))
573579 self .cache = {}
574580
0 commit comments