4848from  diffusers  import  (
4949    AutoencoderKL ,
5050    FlowMatchEulerDiscreteScheduler ,
51-     Lumina2Text2ImgPipeline ,
51+     Lumina2Pipeline ,
5252    Lumina2Transformer2DModel ,
5353)
5454from  diffusers .optimization  import  get_scheduler 
@@ -898,7 +898,7 @@ def main(args):
898898        cur_class_images  =  len (list (class_images_dir .iterdir ()))
899899
900900        if  cur_class_images  <  args .num_class_images :
901-             pipeline  =  Lumina2Text2ImgPipeline .from_pretrained (
901+             pipeline  =  Lumina2Pipeline .from_pretrained (
902902                args .pretrained_model_name_or_path ,
903903                torch_dtype = torch .bfloat16  if  args .mixed_precision  ==  "bf16"  else  torch .float16 ,
904904                revision = args .revision ,
@@ -990,7 +990,7 @@ def main(args):
990990    text_encoder .to (dtype = torch .bfloat16 )
991991
992992    # Initialize a text encoding pipeline and keep it to CPU for now. 
993-     text_encoding_pipeline  =  Lumina2Text2ImgPipeline .from_pretrained (
993+     text_encoding_pipeline  =  Lumina2Pipeline .from_pretrained (
994994        args .pretrained_model_name_or_path ,
995995        vae = None ,
996996        transformer = None ,
@@ -1034,7 +1034,7 @@ def save_model_hook(models, weights, output_dir):
10341034                # make sure to pop weight so that corresponding model is not saved again 
10351035                weights .pop ()
10361036
1037-             Lumina2Text2ImgPipeline .save_lora_weights (
1037+             Lumina2Pipeline .save_lora_weights (
10381038                output_dir ,
10391039                transformer_lora_layers = transformer_lora_layers_to_save ,
10401040            )
@@ -1050,7 +1050,7 @@ def load_model_hook(models, input_dir):
10501050            else :
10511051                raise  ValueError (f"unexpected save model: { model .__class__ }  )
10521052
1053-         lora_state_dict  =  Lumina2Text2ImgPipeline .lora_state_dict (input_dir )
1053+         lora_state_dict  =  Lumina2Pipeline .lora_state_dict (input_dir )
10541054
10551055        transformer_state_dict  =  {
10561056            f"{ k .replace ('transformer.' , '' )}  : v  for  k , v  in  lora_state_dict .items () if  k .startswith ("transformer." )
@@ -1473,7 +1473,7 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
14731473        if  accelerator .is_main_process :
14741474            if  args .validation_prompt  is  not None  and  epoch  %  args .validation_epochs  ==  0 :
14751475                # create pipeline 
1476-                 pipeline  =  Lumina2Text2ImgPipeline .from_pretrained (
1476+                 pipeline  =  Lumina2Pipeline .from_pretrained (
14771477                    args .pretrained_model_name_or_path ,
14781478                    transformer = accelerator .unwrap_model (transformer ),
14791479                    revision = args .revision ,
@@ -1503,14 +1503,14 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
15031503            transformer  =  transformer .to (weight_dtype )
15041504        transformer_lora_layers  =  get_peft_model_state_dict (transformer )
15051505
1506-         Lumina2Text2ImgPipeline .save_lora_weights (
1506+         Lumina2Pipeline .save_lora_weights (
15071507            save_directory = args .output_dir ,
15081508            transformer_lora_layers = transformer_lora_layers ,
15091509        )
15101510
15111511        # Final inference 
15121512        # Load previous pipeline 
1513-         pipeline  =  Lumina2Text2ImgPipeline .from_pretrained (
1513+         pipeline  =  Lumina2Pipeline .from_pretrained (
15141514            args .pretrained_model_name_or_path ,
15151515            revision = args .revision ,
15161516            variant = args .variant ,
0 commit comments