From c68f49e6805b04c740e19aaf01b477705015f8d5 Mon Sep 17 00:00:00 2001 From: Emmanuel Ferdman Date: Wed, 27 Aug 2025 00:29:42 -0700 Subject: [PATCH] Update lora layer source Signed-off-by: Emmanuel Ferdman --- paddleformers/peft/lora/lora_quantization_layers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddleformers/peft/lora/lora_quantization_layers.py b/paddleformers/peft/lora/lora_quantization_layers.py index 0a595cdae04..2c2bbcf4f4f 100644 --- a/paddleformers/peft/lora/lora_quantization_layers.py +++ b/paddleformers/peft/lora/lora_quantization_layers.py @@ -90,7 +90,7 @@ class QuantizationLoRALinear(QuantizationLoRABaseLinear): """ Quantization lora Linear layer. The code implementation refers to paddlenformers.peft.lora.lora_layers.LoRALinear. - https://github.com/PaddlePaddle/PaddleFormers/blob/develop/paddlenformers/peft/lora/lora_layers.py + https://github.com/PaddlePaddle/PaddleFormers/blob/develop/paddleformers/peft/lora/lora_layers.py Compare to LoRALinear, this class keeps weight in INT8/INT4 with quant scale, and supports weight_only_linear for input tensor and origin weight(LoRA part still uses fp16/bf16). """ @@ -128,7 +128,7 @@ class ColumnParallelQuantizationLoRALinear(QuantizationLoRABaseLinear): """ Quantization lora Linear layer with mp parallelized(column). The code implementation refers to paddlenformers.peft.lora.lora_layers.ColumnParallelLoRALinear. - https://github.com/PaddlePaddle/PaddleFormers/blob/develop/paddlenformers/peft/lora/lora_layers.py#L203 + https://github.com/PaddlePaddle/PaddleFormers/blob/develop/paddleformers/peft/lora/lora_layers.py#L203 Compare to ColumnParallelLoRALinear, this class keeps weight in INT8/INT4 with quant scale, and supports weight_only_linear for input tensor and origin weight(LoRA part still uses fp16/bf16). """ @@ -205,7 +205,7 @@ class RowParallelQuantizationLoRALinear(QuantizationLoRABaseLinear): """ Quantization lora Linear layer with mp parallelized(row). The code implementation refers to paddlenformers.peft.lora.lora_layers.RowParallelLoRALinear. - https://github.com/PaddlePaddle/PaddleFormers/blob/develop/paddlenformers/peft/lora/lora_layers.py#L99 + https://github.com/PaddlePaddle/PaddleFormers/blob/develop/paddleformers/peft/lora/lora_layers.py#L99 Compare to RowParallelLoRALinear, this class keeps weight in INT8/INT4 with quant scale, and supports weight_only_linear for input tensor and origin weight(LoRA part still uses fp16/bf16). """