|
9 | 9 | from torch.utils.hooks import RemovableHandle |
10 | 10 | from transformers import CLIPTextModel |
11 | 11 |
|
12 | | -from ..invoke.globals import global_lora_models_dir |
| 12 | +from ..invoke.globals import global_lora_models_dir, Globals |
13 | 13 | from ..invoke.devices import choose_torch_device |
14 | 14 |
|
15 | 15 | """ |
@@ -456,10 +456,12 @@ def load_from_dict(self, state_dict): |
456 | 456 |
|
457 | 457 |
|
458 | 458 | class KohyaLoraManager: |
459 | | - lora_path = Path(global_lora_models_dir()) |
460 | | - vector_length_cache_path = lora_path / '.vectorlength.cache' |
| 459 | + lora_path = None |
| 460 | + vector_length_cache_path = None |
461 | 461 |
|
462 | 462 | def __init__(self, pipe): |
| 463 | + self.lora_path = Path(global_lora_models_dir()) |
| 464 | + self.vector_length_cache_path = self.lora_path / '.vectorlength.cache' |
463 | 465 | self.unet = pipe.unet |
464 | 466 | self.wrapper = LoRAModuleWrapper(pipe.unet, pipe.text_encoder) |
465 | 467 | self.text_encoder = pipe.text_encoder |
@@ -566,6 +568,7 @@ def vector_length_from_checkpoint_file(self, checkpoint_path: Path) -> int: |
566 | 568 | class LoraVectorLengthCache(object): |
567 | 569 | def __init__(self, cache_path: Path): |
568 | 570 | self.cache_path = cache_path |
| 571 | + print(f'DEBUG: lock path = {Path(cache_path.parent, ".cachelock")}') |
569 | 572 | self.lock = FileLock(Path(cache_path.parent, ".cachelock")) |
570 | 573 | self.cache = {} |
571 | 574 |
|
|
0 commit comments