Skip to content

Commit 516655b

Browse files
committed
Update model_loading.py
1 parent 67f2f6a commit 516655b

File tree

1 file changed

+1
-30
lines changed

1 file changed

+1
-30
lines changed

model_loading.py

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -36,24 +36,13 @@ def patched_write_atomic(
3636

3737
import torch
3838
import torch.nn as nn
39-
from .utils import check_diffusers_version, remove_specific_blocks, log
40-
check_diffusers_version()
4139

4240
from diffusers.models import AutoencoderKLCogVideoX
4341
from diffusers.schedulers import CogVideoXDDIMScheduler
4442
from .custom_cogvideox_transformer_3d import CogVideoXTransformer3DModel
4543
from .pipeline_cogvideox import CogVideoXPipeline
4644
from contextlib import nullcontext
4745

48-
from .cogvideox_fun.transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelFun
49-
from .cogvideox_fun.fun_pab_transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelFunPAB
50-
from .cogvideox_fun.autoencoder_magvit import AutoencoderKLCogVideoX as AutoencoderKLCogVideoXFun
51-
52-
from .cogvideox_fun.pipeline_cogvideox_inpaint import CogVideoX_Fun_Pipeline_Inpaint
53-
from .cogvideox_fun.pipeline_cogvideox_control import CogVideoX_Fun_Pipeline_Control
54-
55-
from .videosys.cogvideox_transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelPAB
56-
5746
from accelerate import init_empty_weights
5847
from accelerate.utils import set_module_tensor_to_device
5948

@@ -231,8 +220,6 @@ def loadmodel(self, model, precision, quantization="disabled", compile="disabled
231220

232221
if block_edit is not None:
233222
transformer = remove_specific_blocks(transformer, block_edit)
234-
235-
236223

237224
with open(scheduler_path) as f:
238225
scheduler_config = json.load(f)
@@ -274,22 +261,6 @@ def loadmodel(self, model, precision, quantization="disabled", compile="disabled
274261
for l in lora:
275262
pipe.set_adapters(adapter_list, adapter_weights=adapter_weights)
276263
if fuse:
277-
pipe.fuse_lora(lora_scale=lora[-1]["strength"] / lora_rank, components=["transformer"])
278-
279-
#fp8
280-
if fp8_transformer == "enabled" or fp8_transformer == "fastmode":
281-
for name, param in pipe.transformer.named_parameters():
282-
params_to_keep = {"patch_embed", "lora", "pos_embedding"}
283-
if not any(keyword in name for keyword in params_to_keep):
284-
param.data = param.data.to(torch.float8_e4m3fn)
285-
286-
if fp8_transformer == "fastmode":
287-
from .fp8_optimization import convert_fp8_linear
288-
convert_fp8_linear(pipe.transformer, dtype)
289-
290-
if enable_sequential_cpu_offload:
291-
pipe.enable_sequential_cpu_offload()
292-
293264
lora_scale = 1
294265
dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling
295266
if any(item in lora[-1]["path"].lower() for item in dimension_loras):
@@ -1057,4 +1028,4 @@ def loadmodel(self, model):
10571028
"CogVideoLoraSelect": "CogVideo LoraSelect",
10581029
"CogVideoXVAELoader": "CogVideoX VAE Loader",
10591030
"CogVideoXModelLoader": "CogVideoX Model Loader",
1060-
}
1031+
}

0 commit comments

Comments
 (0)