Skip to content

Commit 179848a

Browse files
Luka-Dwillmj
andauthored
fix: Update QuantLinear import to GPTQLoraLinear (#146)
* fix: Update QuantLinear import to GPTQLoraLinear Renaming QuantLinear to GPTQLoraLinear to match the changes made in the peft library. Signed-off-by: Luka Dojcinovic <[email protected]> * fix: Specified peft version Updated dependencies to include peft>=0.15 Signed-off-by: Luka Dojcinovic <[email protected]> * fix: remove deps from plugins Signed-off-by: Will Johnson <[email protected]> --------- Signed-off-by: Luka Dojcinovic <[email protected]> Signed-off-by: Will Johnson <[email protected]> Co-authored-by: Will Johnson <[email protected]>
1 parent 1a804e4 commit 179848a

File tree

4 files changed

+6
-6
lines changed

4 files changed

+6
-6
lines changed

plugins/accelerated-peft/src/fms_acceleration_peft/autogptq_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
ModelPatcherTrigger,
2727
)
2828
from peft import LoraConfig
29-
from peft.tuners.lora.gptq import QuantLinear as LoraLinearGPTQ
29+
from peft.tuners.lora.gptq import GPTQLoraLinear
3030
import torch
3131

3232
# these parameters are to be patched for triton v2
@@ -162,7 +162,7 @@ def create_new_module_peft(
162162
# to be installed
163163
new_module = None
164164
if isinstance(target, target_cls):
165-
new_module = LoraLinearGPTQ(
165+
new_module = GPTQLoraLinear(
166166
target, adapter_name, lora_config=lora_config, **kwargs
167167
)
168168

plugins/accelerated-peft/src/fms_acceleration_peft/gptqmodel/utils/peft.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING
3131
from peft.peft_model import PEFT_TYPE_TO_MODEL_MAPPING
3232
from peft.tuners.lora import LoraConfig, LoraModel
33-
from peft.tuners.lora.gptq import QuantLinear as LoraLinearGPTQ
33+
from peft.tuners.lora.gptq import GPTQLoraLinear
3434
import torch
3535

3636
# Local
@@ -68,7 +68,7 @@ def _create_new_module(
6868
# to be installed
6969
new_module = None
7070
if isinstance(target, target_cls):
71-
new_module = LoraLinearGPTQ(
71+
new_module = GPTQLoraLinear(
7272
target, adapter_name, lora_config=lora_config, **kwargs
7373
)
7474

plugins/framework/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ classifiers=[
2424
dependencies = [
2525
"numpy<2.0", # numpy needs to be bounded due to incompatiblity with current torch<2.3
2626
"torch>2.2",
27-
"peft<=0.14.0", # QuantLinear is not available for peft version > 0.14.0
27+
"peft>=0.15.0",
2828
"accelerate",
2929
"pandas",
3030
]

plugins/fused-ops-and-kernels/tests/test_fused_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
if _is_package_available("auto_gptq"):
2929
# pylint: disable=ungrouped-imports
3030
# Third Party
31-
from peft.tuners.lora.gptq import QuantLinear as LoraGPTQLinear4bit
31+
from peft.tuners.lora.gptq import GPTQLoraLinear as LoraGPTQLinear4bit
3232

3333
LORA_QUANTIZED_CLASSES[GPTQ] = LoraGPTQLinear4bit
3434

0 commit comments

Comments
 (0)