Skip to content

Commit b37af14

Browse files
committed
use lora->get_scale
1 parent e444b8e commit b37af14

File tree

1 file changed

+2
-3
lines changed

1 file changed

+2
-3
lines changed

src/llama.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2632,9 +2632,8 @@ static struct ggml_tensor * llm_build_lora_mm(
26322632
if (lora == nullptr) {
26332633
continue;
26342634
}
2635-
const float alpha = it.first->alpha;
2636-
const float rank = (float) lora->b->ne[0];
2637-
const float scale = alpha ? it.second * alpha / rank : it.second;
2635+
const float adapter_scale = it.second;
2636+
const float scale = lora->get_scale(it.first->alpha, adapter_scale);
26382637
struct ggml_tensor * ab_cur = ggml_mul_mat(
26392638
ctx0, lora->b,
26402639
ggml_mul_mat(ctx0, lora->a, cur)

0 commit comments

Comments
 (0)