Skip to content

Commit 9813b17

Browse files
HaohanTsaoyeonjoon-jung01
authored andcommitted
TST Add test suite for GraLoRA.
1 parent bfa1ef7 commit 9813b17

File tree

5 files changed

+566
-7
lines changed

5 files changed

+566
-7
lines changed

src/peft/tuners/gralora/layer.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -224,8 +224,8 @@ def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = N
224224
before merging the weights. This is useful if you want to check if the merge operation will produce
225225
NaNs. Defaults to `False`.
226226
adapter_names (`list[str]`, *optional*):
227-
The list of adapter names that should be merged. If None, all active adapters will be merged.
228-
Defaults to `None`.
227+
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
228+
to `None`.
229229
"""
230230
from peft.tuners.tuners_utils import check_adapters_to_merge
231231

@@ -274,9 +274,8 @@ def get_delta_weight(self, adapter) -> torch.Tensor:
274274
"""
275275
Compute the delta weight for GraLoRA adapter.
276276
277-
GraLoRA applies block-wise low-rank adaptation with information exchange.
278-
This method computes the equivalent weight matrix that would be added to
279-
the base weight during merge.
277+
GraLoRA applies block-wise low-rank adaptation with information exchange. This method computes the equivalent
278+
weight matrix that would be added to the base weight during merge.
280279
281280
Args:
282281
adapter (str): The name of the adapter

src/peft/tuners/gralora/model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -323,8 +323,8 @@ def merge_and_unload(
323323
self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
324324
):
325325
r"""
326-
This method merges the Gralora layers into the base model. This is needed if someone wants to use the base model
327-
as a standalone model.
326+
This method merges the Gralora layers into the base model. This is needed if someone wants to use the base
327+
model as a standalone model.
328328
329329
Args:
330330
progressbar (`bool`):

tests/test_config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
BoneConfig,
2828
C3AConfig,
2929
FourierFTConfig,
30+
GraloraConfig,
3031
HRAConfig,
3132
IA3Config,
3233
LNTuningConfig,
@@ -64,6 +65,7 @@
6465
(BoneConfig, {}),
6566
(C3AConfig, {}),
6667
(FourierFTConfig, {}),
68+
(GraloraConfig, {}),
6769
(HRAConfig, {}),
6870
(IA3Config, {}),
6971
(LNTuningConfig, {}),

tests/test_decoder_models.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
CPTConfig,
3535
DeloraConfig,
3636
FourierFTConfig,
37+
GraloraConfig,
3738
HRAConfig,
3839
IA3Config,
3940
LoraConfig,
@@ -137,6 +138,30 @@
137138
"target_modules": None,
138139
},
139140
),
141+
(
142+
GraloraConfig,
143+
{
144+
"task_type": "CAUSAL_LM",
145+
"r": 8,
146+
"gralora_alpha": 16,
147+
"target_modules": None,
148+
"gralora_dropout": 0.05,
149+
"gralora_k": 2,
150+
"hybrid_r": 0,
151+
},
152+
),
153+
(
154+
GraloraConfig,
155+
{
156+
"task_type": "CAUSAL_LM",
157+
"r": 16,
158+
"gralora_alpha": 32,
159+
"target_modules": None,
160+
"gralora_dropout": 0.05,
161+
"gralora_k": 4,
162+
"hybrid_r": 4,
163+
},
164+
),
140165
(
141166
HRAConfig,
142167
{

0 commit comments

Comments
 (0)