File tree Expand file tree Collapse file tree 1 file changed +4
-1
lines changed
Expand file tree Collapse file tree 1 file changed +4
-1
lines changed Original file line number Diff line number Diff line change 2525from huggingface_hub import model_info
2626from huggingface_hub .constants import HF_HUB_OFFLINE
2727
28- from ..hooks .group_offloading import _is_group_offload_enabled , _maybe_remove_and_reapply_group_offloading
2928from ..models .modeling_utils import ModelMixin , load_state_dict
3029from ..utils import (
3130 USE_PEFT_BACKEND ,
@@ -331,6 +330,8 @@ def _load_lora_into_text_encoder(
331330 hotswap : bool = False ,
332331 metadata = None ,
333332):
333+ from ..hooks .group_offloading import _maybe_remove_and_reapply_group_offloading
334+
334335 if not USE_PEFT_BACKEND :
335336 raise ValueError ("PEFT backend is required for this method." )
336337
@@ -442,6 +443,8 @@ def _func_optionally_disable_offloading(_pipeline):
442443 tuple:
443444 A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` or `is_group_offload` is True.
444445 """
446+ from ..hooks .group_offloading import _is_group_offload_enabled
447+
445448 is_model_cpu_offload = False
446449 is_sequential_cpu_offload = False
447450 is_group_offload = False
You can’t perform that action at this time.
0 commit comments