5858from  peft .utils  import  INCLUDE_LINEAR_LAYERS_SHORTHAND , ModulesToSaveWrapper , infer_device 
5959from  peft .utils .constants  import  DUMMY_MODEL_CONFIG , MIN_TARGET_MODULES_FOR_OPTIMIZATION 
6060
61+ from  .testing_common  import  hub_online_once 
6162from  .testing_utils  import  require_bitsandbytes , require_non_cpu 
6263
6364
@@ -208,7 +209,8 @@ def test_module_matching_lora(self):
208209        # configs that could exist. This is okay as the method calls `check_target_module_exists` internally, which 
209210        # has been extensively tested above. 
210211        model_id  =  "hf-internal-testing/tiny-random-BloomForCausalLM" 
211-         model  =  AutoModel .from_pretrained (model_id )
212+         with  hub_online_once (model_id ):
213+             model  =  AutoModel .from_pretrained (model_id )
212214        # by default, this model matches query_key_value 
213215        config  =  LoraConfig ()
214216        peft_model  =  get_peft_model (model , config )
@@ -231,7 +233,8 @@ def test_module_matching_lora(self):
231233
232234    def  test_feedforward_matching_ia3 (self ):
233235        model_id  =  "hf-internal-testing/tiny-random-T5ForConditionalGeneration" 
234-         model  =  AutoModelForSeq2SeqLM .from_pretrained (model_id )
236+         with  hub_online_once (model_id ):
237+             model  =  AutoModelForSeq2SeqLM .from_pretrained (model_id )
235238        # simple example for just one t5 block for testing 
236239        config_kwargs  =  {
237240            "target_modules" : ".*encoder.*block.0.*(SelfAttention|EncDecAttention|DenseReluDense).(k|q|v|wo|wi)$" ,
@@ -265,7 +268,8 @@ def test_feedforward_matching_ia3(self):
265268    def  test_maybe_include_all_linear_layers_lora (
266269        self , model_id , model_type , initial_target_modules , expected_target_modules 
267270    ):
268-         model  =  self .transformers_class_map [model_type ].from_pretrained (model_id )
271+         with  hub_online_once (model_id ):
272+             model  =  self .transformers_class_map [model_type ].from_pretrained (model_id )
269273        config_cls  =  LoraConfig 
270274        self ._check_match_with_expected_target_modules (
271275            model_id , model , config_cls , initial_target_modules , expected_target_modules 
@@ -281,7 +285,11 @@ def test_maybe_include_all_linear_layers_lora_bnb(
281285            config_kwargs  =  {"quantization_config" : BitsAndBytesConfig (load_in_4bit = True )}
282286        elif  quantization  ==  "8bit" :
283287            config_kwargs  =  {"quantization_config" : BitsAndBytesConfig (load_in_8bit = True )}
284-         model  =  self .transformers_class_map [model_type ].from_pretrained (model_id , device_map = "auto" , ** config_kwargs )
288+ 
289+         with  hub_online_once (model_id ):
290+             model  =  self .transformers_class_map [model_type ].from_pretrained (
291+                 model_id , device_map = "auto" , ** config_kwargs 
292+             )
285293        config_cls  =  LoraConfig 
286294        self ._check_match_with_expected_target_modules (
287295            model_id , model , config_cls , initial_target_modules , expected_target_modules 
@@ -310,7 +318,8 @@ def test_maybe_include_all_linear_layers_ia3_loha(self):
310318            INCLUDE_LINEAR_LAYERS_SHORTHAND ,
311319            ["k_proj" , "v_proj" , "q_proj" , "o_proj" , "down_proj" , "up_proj" , "gate_proj" ],
312320        )
313-         model_ia3  =  AutoModelForCausalLM .from_pretrained (model_id )
321+         with  hub_online_once (model_id ):
322+             model_ia3  =  AutoModelForCausalLM .from_pretrained (model_id )
314323        model_loha  =  deepcopy (model_ia3 )
315324        config_classes  =  [IA3Config , LoHaConfig ]
316325        models  =  [model_ia3 , model_loha ]
@@ -322,7 +331,8 @@ def test_maybe_include_all_linear_layers_ia3_loha(self):
322331    @parameterized .expand (MAYBE_INCLUDE_ALL_LINEAR_LAYERS_TEST_INTERNALS ) 
323332    def  test_maybe_include_all_linear_layers_internals (self , initial_target_modules , expected_target_modules ):
324333        model_id  =  "HuggingFaceH4/tiny-random-LlamaForCausalLM" 
325-         model  =  AutoModelForCausalLM .from_pretrained (model_id )
334+         with  hub_online_once (model_id ):
335+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
326336        config  =  LoraConfig (base_model_name_or_path = model_id , target_modules = initial_target_modules )
327337        new_config  =  _maybe_include_all_linear_layers (config , model )
328338        if  isinstance (expected_target_modules , list ):
@@ -333,7 +343,8 @@ def test_maybe_include_all_linear_layers_internals(self, initial_target_modules,
333343
334344    def  test_maybe_include_all_linear_layers_diffusion (self ):
335345        model_id  =  "hf-internal-testing/tiny-sd-pipe" 
336-         model  =  StableDiffusionPipeline .from_pretrained (model_id )
346+         with  hub_online_once (model_id ):
347+             model  =  StableDiffusionPipeline .from_pretrained (model_id )
337348        config  =  LoraConfig (base_model_name_or_path = model_id , target_modules = "all-linear" )
338349
339350        # all linear layers should be converted 
@@ -347,7 +358,8 @@ def test_maybe_include_all_linear_does_not_target_classifier_head(self):
347358        # Ensure that if a SEQ_CLS model is being used with target_modules="all-linear", the classification head is not 
348359        # targeted by the adapter layer. 
349360        model_id  =  "HuggingFaceH4/tiny-random-LlamaForCausalLM" 
350-         model  =  AutoModelForSequenceClassification .from_pretrained (model_id , num_labels = 10 )
361+         with  hub_online_once (model_id ):
362+             model  =  AutoModelForSequenceClassification .from_pretrained (model_id , num_labels = 10 )
351363        # sanity check 
352364        assert  isinstance (model .score , nn .Linear )
353365
@@ -372,7 +384,8 @@ def test_all_linear_nested_targets_correct_layers(
372384        # See 2390 
373385        # Ensure that if adapter layers are already applied, we don't get nested adapter layers (e.g. LoRA targeting the 
374386        # lora_A, lora_B layers) 
375-         model  =  self .transformers_class_map [model_type ].from_pretrained (model_id )
387+         with  hub_online_once (model_id ):
388+             model  =  self .transformers_class_map [model_type ].from_pretrained (model_id )
376389        config_cls  =  LoraConfig 
377390        self ._check_match_with_expected_target_modules (
378391            model_id , model , config_cls , initial_target_modules , expected_target_modules 
@@ -386,7 +399,8 @@ def test_add_second_adapter_with_all_linear_works(self):
386399        # See 2390 Similar test to test_all_linear_nested_targets_correct_layers above, but using add_adapter instead of 
387400        # calling get_peft_model in an already adapted model 
388401        model_id  =  "HuggingFaceH4/tiny-random-LlamaForCausalLM" 
389-         model  =  AutoModelForCausalLM .from_pretrained (model_id )
402+         with  hub_online_once (model_id ):
403+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
390404
391405        # important: don't reuse the first config, since config.target_modules will be overwritten, which would make the 
392406        # test pass trivially. 
@@ -467,7 +481,9 @@ def test_ia3_targeted_module_list(self):
467481        assert  model .targeted_module_names  ==  ["lin0" , "lin1" ]
468482
469483    def  test_realistic_example (self ):
470-         model  =  AutoModelForCausalLM .from_pretrained ("hf-internal-testing/tiny-random-BloomForCausalLM" )
484+         model_id  =  "hf-internal-testing/tiny-random-BloomForCausalLM" 
485+         with  hub_online_once (model_id ):
486+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
471487        config  =  LoraConfig (task_type = "CAUSAL_LM" )
472488        model  =  get_peft_model (model , config )
473489        expected  =  [
@@ -493,7 +509,9 @@ def test_two_targeted_parameters_list(self):
493509        assert  model .targeted_parameter_names  ==  ["lin0.weight" , "lin1.weight" ]
494510
495511    def  test_realistic_example (self ):
496-         model  =  AutoModelForCausalLM .from_pretrained ("trl-internal-testing/tiny-random-LlamaForCausalLM" )
512+         model_id  =  "trl-internal-testing/tiny-random-LlamaForCausalLM" 
513+         with  hub_online_once (model_id ):
514+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
497515        config  =  LoraConfig (target_modules = [], task_type = "CAUSAL_LM" , target_parameters = ["v_proj.weight" ])
498516        model  =  get_peft_model (model , config )
499517        expected  =  [
@@ -564,7 +582,9 @@ def test_exclude_modules_not_used(self):
564582            get_peft_model (model , LoraConfig (target_modules = ["lin1" ], exclude_modules = ["non_existent_module" ]))
565583
566584    def  test_realistic_example (self ):
567-         model  =  AutoModelForCausalLM .from_pretrained ("hf-internal-testing/tiny-random-BloomForCausalLM" )
585+         model_id  =  "hf-internal-testing/tiny-random-BloomForCausalLM" 
586+         with  hub_online_once (model_id ):
587+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
568588        config  =  LoraConfig (task_type = "CAUSAL_LM" , exclude_modules = "transformer.h.2.self_attention.query_key_value" )
569589        model  =  get_peft_model (model , config )
570590        expected  =  [
@@ -843,7 +863,8 @@ def test_base_model_type_large(self, large_model):
843863    def  test_base_model_type_transformers_automodel (self ):
844864        # ensure that this also works with transformers AutoModels 
845865        model_id  =  "google/flan-t5-small" 
846-         model  =  AutoModel .from_pretrained (model_id )
866+         with  hub_online_once (model_id ):
867+             model  =  AutoModel .from_pretrained (model_id )
847868        model  =  get_peft_model (model , LoraConfig ())
848869        model_status  =  model .get_model_status ()
849870        assert  model_status .base_model_type  ==  "T5Model" 
@@ -1144,7 +1165,8 @@ def test_transformers_model(self):
11441165        model_id  =  "peft-internal-testing/gpt2-lora-random" 
11451166        # note that loading through AutoModelForCausalLM.from_pretrained does not enable training mode, hence 
11461167        # requires_grad=False 
1147-         model  =  AutoModelForCausalLM .from_pretrained (model_id )
1168+         with  hub_online_once (model_id ):
1169+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
11481170        model_status  =  get_model_status (model )
11491171        layer_status  =  get_layer_status (model )
11501172
@@ -1215,7 +1237,9 @@ def test_vanilla_model_raises(self):
12151237            get_model_status (model )
12161238
12171239    def  test_transformer_model_without_adapter_raises (self ):
1218-         model  =  AutoModelForCausalLM .from_pretrained ("gpt2" )
1240+         model_id  =  "gpt2" 
1241+         with  hub_online_once (model_id ):
1242+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
12191243        # note: full error message is longer 
12201244        with  pytest .raises (ValueError , match = "No adapter layers found in the model" ):
12211245            get_layer_status (model )
@@ -1224,7 +1248,9 @@ def test_transformer_model_without_adapter_raises(self):
12241248            get_model_status (model )
12251249
12261250    def  test_prefix_tuning (self ):
1227-         model  =  AutoModelForSeq2SeqLM .from_pretrained ("hf-internal-testing/tiny-random-BartForConditionalGeneration" )
1251+         model_id  =  "hf-internal-testing/tiny-random-BartForConditionalGeneration" 
1252+         with  hub_online_once (model_id ):
1253+             model  =  AutoModelForSeq2SeqLM .from_pretrained (model_id )
12281254        config  =  PromptTuningConfig (task_type = "SEQ_2_SEQ_LM" , num_virtual_tokens = 10 )
12291255        model  =  get_peft_model (model , config )
12301256
@@ -1236,7 +1262,9 @@ def test_prefix_tuning(self):
12361262            model .get_model_status ()
12371263
12381264    def  test_adaption_prompt (self ):
1239-         model  =  AutoModelForCausalLM .from_pretrained ("HuggingFaceH4/tiny-random-LlamaForCausalLM" )
1265+         model_id  =  "HuggingFaceH4/tiny-random-LlamaForCausalLM" 
1266+         with  hub_online_once (model_id ):
1267+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
12401268        config  =  AdaptionPromptConfig (adapter_layers = 1 , adapter_len = 4 )
12411269        model  =  get_peft_model (model , config )
12421270
@@ -1322,8 +1350,10 @@ class TestBaseTunerWarnForTiedEmbeddings:
13221350    )
13231351
13241352    def  _get_peft_model (self , tie_word_embeddings , target_module ):
1353+         with  hub_online_once (self .model_id ):
1354+             base_model  =  AutoModelForCausalLM .from_pretrained (self .model_id , tie_word_embeddings = tie_word_embeddings )
13251355        model  =  get_peft_model (
1326-             AutoModelForCausalLM . from_pretrained ( self . model_id ,  tie_word_embeddings = tie_word_embeddings ) ,
1356+             base_model ,
13271357            LoraConfig (target_modules = [target_module ]),
13281358        )
13291359        return  model 
@@ -1454,7 +1484,8 @@ def test_get_peft_model_applies_find_target_modules(self):
14541484        # target_modules is big enough. The resulting model itself should be unaffected. 
14551485        torch .manual_seed (0 )
14561486        model_id  =  "facebook/opt-125m"   # must be big enough for optimization to trigger 
1457-         model  =  AutoModelForCausalLM .from_pretrained (model_id )
1487+         with  hub_online_once (model_id ):
1488+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
14581489
14591490        # base case: specify target_modules in a minimal fashion 
14601491        config  =  LoraConfig (init_lora_weights = False , target_modules = ["q_proj" , "v_proj" ])
@@ -1475,7 +1506,8 @@ def test_get_peft_model_applies_find_target_modules(self):
14751506        del  model 
14761507
14771508        torch .manual_seed (0 )
1478-         model  =  AutoModelForCausalLM .from_pretrained (model_id )
1509+         with  hub_online_once (model_id ):
1510+             model  =  AutoModelForCausalLM .from_pretrained (model_id )
14791511        # pass the big target_modules to config 
14801512        config  =  LoraConfig (init_lora_weights = False , target_modules = big_target_modules )
14811513        model  =  get_peft_model (model , config )
0 commit comments