2323
2424from  ...callbacks  import  MultiPipelineCallbacks , PipelineCallback 
2525from  ...image_processor  import  PixArtImageProcessor 
26- from  ...loaders  import  SanaLoraLoaderMixin 
2726from  ...models  import  AutoencoderDC , SanaTransformer2DModel 
2827from  ...models .attention_processor  import  PAGCFGSanaLinearAttnProcessor2_0 , PAGIdentitySanaLinearAttnProcessor2_0 
2928from  ...schedulers  import  FlowMatchEulerDiscreteScheduler 
3029from  ...utils  import  (
3130    BACKENDS_MAPPING ,
32-     USE_PEFT_BACKEND ,
3331    is_bs4_available ,
3432    is_ftfy_available ,
3533    logging ,
3634    replace_example_docstring ,
37-     scale_lora_layers ,
38-     unscale_lora_layers ,
3935)
4036from  ...utils .torch_utils  import  randn_tensor 
4137from  ..pipeline_utils  import  DiffusionPipeline , ImagePipelineOutput 
@@ -174,7 +170,6 @@ def __init__(
174170            pag_attn_processors = (PAGCFGSanaLinearAttnProcessor2_0 (), PAGIdentitySanaLinearAttnProcessor2_0 ()),
175171        )
176172
177-     # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline.encode_prompt 
178173    def  encode_prompt (
179174        self ,
180175        prompt : Union [str , List [str ]],
@@ -189,7 +184,6 @@ def encode_prompt(
189184        clean_caption : bool  =  False ,
190185        max_sequence_length : int  =  300 ,
191186        complex_human_instruction : Optional [List [str ]] =  None ,
192-         lora_scale : Optional [float ] =  None ,
193187    ):
194188        r""" 
195189        Encodes the prompt into text encoder hidden states. 
@@ -223,15 +217,6 @@ def encode_prompt(
223217        if  device  is  None :
224218            device  =  self ._execution_device 
225219
226-         # set lora scale so that monkey patched LoRA 
227-         # function of text encoder can correctly access it 
228-         if  lora_scale  is  not   None  and  isinstance (self , SanaLoraLoaderMixin ):
229-             self ._lora_scale  =  lora_scale 
230- 
231-             # dynamically adjust the LoRA scale 
232-             if  self .text_encoder  is  not   None  and  USE_PEFT_BACKEND :
233-                 scale_lora_layers (self .text_encoder , lora_scale )
234- 
235220        if  prompt  is  not   None  and  isinstance (prompt , str ):
236221            batch_size  =  1 
237222        elif  prompt  is  not   None  and  isinstance (prompt , list ):
@@ -327,11 +312,6 @@ def encode_prompt(
327312            negative_prompt_embeds  =  None 
328313            negative_prompt_attention_mask  =  None 
329314
330-         if  self .text_encoder  is  not   None :
331-             if  isinstance (self , SanaLoraLoaderMixin ) and  USE_PEFT_BACKEND :
332-                 # Retrieve the original scale by scaling back the LoRA layers 
333-                 unscale_lora_layers (self .text_encoder , lora_scale )
334- 
335315        return  prompt_embeds , prompt_attention_mask , negative_prompt_embeds , negative_prompt_attention_mask 
336316
337317    # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs 
0 commit comments