|
30 | 30 | from omegaconf import OmegaConf |
31 | 31 | from omegaconf.dictconfig import DictConfig |
32 | 32 | from picklescan.scanner import scan_file_path |
33 | | - |
34 | | -from .ckpt_to_diffuser import ( |
35 | | - load_pipeline_from_original_stable_diffusion_ckpt, |
36 | | - create_vae_diffusers_config, |
37 | | - convert_ldm_vae_checkpoint, |
38 | | - ) |
39 | 33 | from ldm.invoke.devices import CPU_DEVICE |
40 | 34 | from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline |
41 | 35 | from ldm.invoke.globals import Globals, global_cache_dir |
@@ -374,7 +368,10 @@ def _load_ckpt_model(self, model_name, mconfig): |
374 | 368 | print( |
375 | 369 | f">> Converting legacy checkpoint {model_name} into a diffusers model..." |
376 | 370 | ) |
377 | | - |
| 371 | + from .ckpt_to_diffuser import ( |
| 372 | + load_pipeline_from_original_stable_diffusion_ckpt, |
| 373 | + ) |
| 374 | + |
378 | 375 | if self._has_cuda(): |
379 | 376 | torch.cuda.empty_cache() |
380 | 377 | pipeline = load_pipeline_from_original_stable_diffusion_ckpt( |
@@ -1287,17 +1284,28 @@ def _load_vae(self, vae_config) -> AutoencoderKL: |
1287 | 1284 |
|
1288 | 1285 | return vae |
1289 | 1286 |
|
| 1287 | + @staticmethod |
1290 | 1288 | def convert_vae(vae_path: Union[Path,str])->AutoencoderKL: |
| 1289 | + print(f" | A checkpoint VAE was detected. Converting to diffusers format.") |
| 1290 | + vae_path = Path(Globals.root,vae_path).resolve() |
| 1291 | + |
| 1292 | + from .ckpt_to_diffuser import ( |
| 1293 | + create_vae_diffusers_config, |
| 1294 | + convert_ldm_vae_state_dict, |
| 1295 | + ) |
| 1296 | + |
1291 | 1297 | vae_path = Path(vae_path) |
1292 | 1298 | if vae_path.suffix in ['.pt','.ckpt']: |
1293 | | - vae_state_dict = torch.load(vae_path) |
| 1299 | + vae_state_dict = torch.load(vae_path, map_location="cpu") |
1294 | 1300 | else: |
1295 | 1301 | vae_state_dict = safetensors.torch.load_file(vae_path) |
| 1302 | + if 'state_dict' in vae_state_dict: |
| 1303 | + vae_state_dict = vae_state_dict['state_dict'] |
1296 | 1304 | # TODO: see if this works with 1.x inpaint models and 2.x models |
1297 | 1305 | config_file_path = Path(Globals.root,"configs/stable-diffusion/v1-inference.yaml") |
1298 | 1306 | original_conf = OmegaConf.load(config_file_path) |
1299 | 1307 | vae_config = create_vae_diffusers_config(original_conf, image_size=512) # TODO: fix |
1300 | | - diffusers_vae = convert_ldm_vae_checkpoint(vae_state_dict,vae_config) |
| 1308 | + diffusers_vae = convert_ldm_vae_state_dict(vae_state_dict,vae_config) |
1301 | 1309 | vae = AutoencoderKL(**vae_config) |
1302 | 1310 | vae.load_state_dict(diffusers_vae) |
1303 | 1311 | return vae |
|
0 commit comments