Skip to content

Commit 70dfe32

Browse files
committed
Lora: fix rank 1 loras
1 parent 4d04c94 commit 70dfe32

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

lora.hpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -615,12 +615,12 @@ struct LoraModel : public GGMLRunner {
615615
scale_value *= multiplier;
616616

617617
// flat lora tensors to multiply it
618-
int64_t lora_up_rows = lora_up->ne[ggml_n_dims(lora_up) - 1];
619-
lora_up = ggml_reshape_2d(compute_ctx, lora_up, ggml_nelements(lora_up) / lora_up_rows, lora_up_rows);
620-
auto lora_down_n_dims = ggml_n_dims(lora_down);
621-
// assume n_dims should always be a multiple of 2 (otherwise rank 1 doesn't work)
618+
int64_t lora_up_rows = lora_up->ne[ggml_n_dims(lora_up) - 1];
619+
lora_up = ggml_reshape_2d(compute_ctx, lora_up, ggml_nelements(lora_up) / lora_up_rows, lora_up_rows);
620+
auto lora_down_n_dims = ggml_n_dims(lora_down);
621+
//assume n_dims should always be a multiple of 2 (otherwise rank 1 doesn't work)
622622
lora_down_n_dims = (lora_down_n_dims + lora_down_n_dims % 2);
623-
int64_t lora_down_rows = lora_down->ne[lora_down_n_dims - 1];
623+
int64_t lora_down_rows = lora_down->ne[lora_down_n_dims-1];
624624
lora_down = ggml_reshape_2d(compute_ctx, lora_down, ggml_nelements(lora_down) / lora_down_rows, lora_down_rows);
625625

626626
// ggml_mul_mat requires tensor b transposed

0 commit comments

Comments
 (0)