Skip to content

Commit a607d38

Browse files
committed
gemma3n lora
Signed-off-by: NickLucche <[email protected]>
1 parent 3c187d4 commit a607d38

File tree

1 file changed

+5
-10
lines changed

1 file changed

+5
-10
lines changed

vllm/model_executor/models/gemma3n.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020

2121
import torch
2222
from torch import nn
23-
from transformers.models.gemma3n.configuration_gemma3n import Gemma3nTextConfig
2423

24+
from transformers.models.gemma3n.configuration_gemma3n import Gemma3nTextConfig
2525
from vllm.attention import Attention
2626
from vllm.compilation.decorators import support_torch_compile
2727
from vllm.config import CacheConfig, VllmConfig
@@ -52,14 +52,9 @@
5252
from vllm.sequence import IntermediateTensors
5353
from vllm.v1.attention.backends.utils import KVSharingFastPrefillMetadata
5454

55-
from .interfaces import SupportsQuant
56-
from .utils import (
57-
AutoWeightsLoader,
58-
extract_layer_index,
59-
is_pp_missing_parameter,
60-
make_layers,
61-
maybe_prefix,
62-
)
55+
from .interfaces import SupportsLoRA, SupportsQuant
56+
from .utils import (AutoWeightsLoader, extract_layer_index,
57+
is_pp_missing_parameter, make_layers, maybe_prefix)
6358

6459
logger = init_logger(__name__)
6560

@@ -1081,7 +1076,7 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
10811076
return loaded_params
10821077

10831078

1084-
class Gemma3nForCausalLM(nn.Module):
1079+
class Gemma3nForCausalLM(nn.Module, SupportsLoRA):
10851080
packed_modules_mapping = {
10861081
"qkv_proj": [
10871082
"q_proj",

0 commit comments

Comments
 (0)