@@ -346,7 +346,7 @@ def test_with_norm_in_state_dict(self):
346346
347347 _ , _ , inputs = self .get_dummy_inputs (with_generator = False )
348348
349- logger = logging .get_logger ("diffusers.loaders.lora_pipeline " )
349+ logger = logging .get_logger ("diffusers.pipelines.flux.lora_utils " )
350350 logger .setLevel (logging .INFO )
351351
352352 original_output = pipe (** inputs , generator = torch .manual_seed (0 ))[0 ]
@@ -397,7 +397,7 @@ def test_lora_parameter_expanded_shapes(self):
397397 _ , _ , inputs = self .get_dummy_inputs (with_generator = False )
398398 original_out = pipe (** inputs , generator = torch .manual_seed (0 ))[0 ]
399399
400- logger = logging .get_logger ("diffusers.loaders.lora_pipeline " )
400+ logger = logging .get_logger ("diffusers.pipelines.flux.lora_utils " )
401401 logger .setLevel (logging .DEBUG )
402402
403403 # Change the transformer config to mimic a real use case.
@@ -480,7 +480,7 @@ def test_normal_lora_with_expanded_lora_raises_error(self):
480480 pipe = pipe .to (torch_device )
481481 pipe .set_progress_bar_config (disable = None )
482482
483- logger = logging .get_logger ("diffusers.loaders.lora_pipeline " )
483+ logger = logging .get_logger ("diffusers.pipelines.flux.lora_utils " )
484484 logger .setLevel (logging .DEBUG )
485485
486486 out_features , in_features = pipe .transformer .x_embedder .weight .shape
@@ -535,7 +535,7 @@ def test_normal_lora_with_expanded_lora_raises_error(self):
535535 pipe = pipe .to (torch_device )
536536 pipe .set_progress_bar_config (disable = None )
537537
538- logger = logging .get_logger ("diffusers.loaders.lora_pipeline " )
538+ logger = logging .get_logger ("diffusers.pipelines.flux.lora_utils " )
539539 logger .setLevel (logging .DEBUG )
540540
541541 out_features , in_features = pipe .transformer .x_embedder .weight .shape
@@ -584,7 +584,7 @@ def test_fuse_expanded_lora_with_regular_lora(self):
584584 pipe = pipe .to (torch_device )
585585 pipe .set_progress_bar_config (disable = None )
586586
587- logger = logging .get_logger ("diffusers.loaders.lora_pipeline " )
587+ logger = logging .get_logger ("diffusers.pipelines.flux.lora_utils " )
588588 logger .setLevel (logging .DEBUG )
589589
590590 out_features , in_features = pipe .transformer .x_embedder .weight .shape
@@ -647,7 +647,7 @@ def test_load_regular_lora(self):
647647 "transformer.x_embedder.lora_B.weight" : normal_lora_B .weight ,
648648 }
649649
650- logger = logging .get_logger ("diffusers.loaders.lora_pipeline " )
650+ logger = logging .get_logger ("diffusers.pipelines.flux.lora_utils " )
651651 logger .setLevel (logging .INFO )
652652 with CaptureLogger (logger ) as cap_logger :
653653 pipe .load_lora_weights (lora_state_dict , "adapter-1" )
@@ -662,7 +662,7 @@ def test_load_regular_lora(self):
662662 def test_lora_unload_with_parameter_expanded_shapes (self ):
663663 components , _ , _ = self .get_dummy_components (FlowMatchEulerDiscreteScheduler )
664664
665- logger = logging .get_logger ("diffusers.loaders.lora_pipeline " )
665+ logger = logging .get_logger ("diffusers.pipelines.flux.lora_utils " )
666666 logger .setLevel (logging .DEBUG )
667667
668668 # Change the transformer config to mimic a real use case.
@@ -728,7 +728,7 @@ def test_lora_unload_with_parameter_expanded_shapes(self):
728728 def test_lora_unload_with_parameter_expanded_shapes_and_no_reset (self ):
729729 components , _ , _ = self .get_dummy_components (FlowMatchEulerDiscreteScheduler )
730730
731- logger = logging .get_logger ("diffusers.loaders.lora_pipeline " )
731+ logger = logging .get_logger ("diffusers.pipelines.flux.lora_utils " )
732732 logger .setLevel (logging .DEBUG )
733733
734734 # Change the transformer config to mimic a real use case.
0 commit comments