115115 ("Conv2d 2 LoRA" , "Conv2d" , LoraConfig , {"target_modules" : ["conv2d" , "lin0" ]}),
116116 ("Conv2d 1 LoRA with DoRA" , "Conv2d" , LoraConfig , {"target_modules" : ["conv2d" ], "use_dora" : True }),
117117 ("Conv2d 2 LoRA with DoRA" , "Conv2d" , LoraConfig , {"target_modules" : ["conv2d" , "lin0" ], "use_dora" : True }),
118+ ("Conv2d Groups LoRA" , "Conv2dGroups" , LoraConfig , {"target_modules" : ["conv2d" ]}),
119+ ("Conv2d Groups LoRA with DoRA" , "Conv2dGroups" , LoraConfig , {"target_modules" : ["conv2d" ], "use_dora" : True }),
118120 ("Conv3d 1 LoRA" , "Conv3d" , LoraConfig , {"target_modules" : ["conv3d" ]}),
119121 ("Conv3d 2 LoRA" , "Conv3d" , LoraConfig , {"target_modules" : ["conv3d" , "lin0" ]}),
120122 ("Conv3d 1 LoRA with DoRA" , "Conv3d" , LoraConfig , {"target_modules" : ["conv3d" ], "use_dora" : True }),
@@ -903,6 +905,25 @@ def forward(self, X):
903905 return X
904906
905907
908+ class ModelConv2DGroups (nn .Module ):
909+ def __init__ (self ):
910+ super ().__init__ ()
911+ self .conv2d = nn .Conv2d (5 , 5 , 3 , groups = 5 )
912+ self .relu = nn .ReLU ()
913+ self .flat = nn .Flatten ()
914+ self .lin0 = nn .Linear (5 , 2 )
915+ self .sm = nn .LogSoftmax (dim = - 1 )
916+
917+ def forward (self , X ):
918+ X = X .float ().reshape (- 1 , 5 , 3 , 3 )
919+ X = self .conv2d (X )
920+ X = self .relu (X )
921+ X = self .flat (X )
922+ X = self .lin0 (X )
923+ X = self .sm (X )
924+ return X
925+
926+
906927class ModelConv3D (nn .Module ):
907928 def __init__ (self ):
908929 super ().__init__ ()
@@ -967,6 +988,9 @@ def from_pretrained(cls, model_id, torch_dtype=None):
967988 if model_id == "Conv2d" :
968989 return ModelConv2D ().to (torch_dtype )
969990
991+ if model_id == "Conv2dGroups" :
992+ return ModelConv2DGroups ().to (torch_dtype )
993+
970994 if model_id == "Conv3d" :
971995 return ModelConv3D ().to (torch_dtype )
972996
@@ -1038,6 +1062,12 @@ def test_load_multiple_adapters(self, test_name, model_id, config_cls, config_kw
10381062
10391063 @parameterized .expand (TEST_CASES )
10401064 def test_merge_layers (self , test_name , model_id , config_cls , config_kwargs ):
1065+ # https://github.com/huggingface/peft/pull/2403
1066+ if model_id in ["Conv2dGroups" ]:
1067+ pytest .skip (
1068+ f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
1069+ )
1070+
10411071 config_kwargs = config_kwargs .copy ()
10421072 if issubclass (config_cls , LoraConfig ):
10431073 config_kwargs ["init_lora_weights" ] = False
@@ -1055,6 +1085,12 @@ def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
10551085
10561086 @parameterized .expand (TEST_CASES )
10571087 def test_merge_layers_fp16 (self , test_name , model_id , config_cls , config_kwargs ):
1088+ # https://github.com/huggingface/peft/pull/2403
1089+ if model_id in ["Conv2dGroups" ]:
1090+ pytest .skip (
1091+ f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
1092+ )
1093+
10581094 config_kwargs = config_kwargs .copy ()
10591095 if issubclass (config_cls , LoraConfig ):
10601096 config_kwargs ["init_lora_weights" ] = False
@@ -1064,6 +1100,12 @@ def test_merge_layers_fp16(self, test_name, model_id, config_cls, config_kwargs)
10641100
10651101 @parameterized .expand (TEST_CASES )
10661102 def test_merge_layers_is_idempotent (self , test_name , model_id , config_cls , config_kwargs ):
1103+ # https://github.com/huggingface/peft/pull/2403
1104+ if model_id in ["Conv2dGroups" ]:
1105+ pytest .skip (
1106+ f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
1107+ )
1108+
10671109 # calling merge twice with the same arguments should not change the output
10681110 config_kwargs = config_kwargs .copy ()
10691111 if issubclass (config_cls , LoraConfig ):
@@ -1074,6 +1116,12 @@ def test_merge_layers_is_idempotent(self, test_name, model_id, config_cls, confi
10741116
10751117 @parameterized .expand (TEST_CASES )
10761118 def test_safe_merge (self , test_name , model_id , config_cls , config_kwargs ):
1119+ # https://github.com/huggingface/peft/pull/2403
1120+ if model_id in ["Conv2dGroups" ]:
1121+ pytest .skip (
1122+ f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
1123+ )
1124+
10771125 # calling merge twice with the same arguments should not change the output
10781126 config_kwargs = config_kwargs .copy ()
10791127 if issubclass (config_cls , LoraConfig ):
@@ -1291,6 +1339,12 @@ def test_disable_adapters(self, test_name, model_id, config_cls, config_kwargs):
12911339
12921340 @parameterized .expand (TEST_CASES )
12931341 def test_disable_adapters_with_merging (self , test_name , model_id , config_cls , config_kwargs ):
1342+ # https://github.com/huggingface/peft/pull/2403
1343+ if model_id in ["Conv2dGroups" ]:
1344+ pytest .skip (
1345+ f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
1346+ )
1347+
12941348 # same as test_disable_adapters, but with merging
12951349 X = self .prepare_inputs_for_testing ()
12961350 model = self .transformers_class .from_pretrained (model_id ).to (self .torch_device )
0 commit comments