Skip to content

Commit 4f2ad11

Browse files
authored
Fix DeciLM (#2883)
1 parent d7afab6 commit 4f2ad11

File tree

1 file changed

+5
-1
lines changed

1 file changed

+5
-1
lines changed

vllm/model_executor/models/decilm.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import torch
2929
from transformers import PretrainedConfig
3030

31+
from vllm.config import LoRAConfig
3132
from vllm.model_executor.layers.linear import LinearMethodBase
3233
from vllm.model_executor.models.llama import LlamaForCausalLM
3334
from vllm.model_executor.weight_utils import (default_weight_loader,
@@ -56,10 +57,13 @@ def __init__(
5657
self,
5758
config: Optional[PretrainedConfig] = None,
5859
linear_method: Optional[LinearMethodBase] = None,
60+
lora_config: Optional[LoRAConfig] = None,
5961
) -> None:
6062
config.num_key_value_heads = max(config.num_key_value_heads_per_layer)
6163
delattr(config, "num_key_value_heads_per_layer")
62-
super().__init__(config=config, linear_method=linear_method)
64+
super().__init__(config=config,
65+
linear_method=linear_method,
66+
lora_config=lora_config)
6367

6468
def load_weights(self,
6569
model_name_or_path: str,

0 commit comments

Comments
 (0)