Skip to content

Commit bc27552

Browse files
committed
gemma3n lora
Signed-off-by: NickLucche <[email protected]>
1 parent 8696082 commit bc27552

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

vllm/model_executor/models/gemma3n.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020

2121
import torch
2222
from torch import nn
23-
from transformers.models.gemma3n.configuration_gemma3n import Gemma3nTextConfig
2423

24+
from transformers.models.gemma3n.configuration_gemma3n import Gemma3nTextConfig
2525
from vllm.attention import Attention
2626
from vllm.compilation.decorators import support_torch_compile
2727
from vllm.config import CacheConfig, VllmConfig
@@ -47,7 +47,7 @@
4747
from vllm.sequence import IntermediateTensors
4848
from vllm.v1.attention.backends.utils import KVSharingFastPrefillMetadata
4949

50-
from .interfaces import SupportsQuant
50+
from .interfaces import SupportsLoRA, SupportsQuant
5151
from .utils import (AutoWeightsLoader, extract_layer_index,
5252
is_pp_missing_parameter, make_layers, maybe_prefix)
5353

@@ -1047,7 +1047,7 @@ def load_weights(self, weights: Iterable[tuple[str,
10471047
return loaded_params
10481048

10491049

1050-
class Gemma3nForCausalLM(nn.Module):
1050+
class Gemma3nForCausalLM(nn.Module, SupportsLoRA):
10511051
packed_modules_mapping = {
10521052
"qkv_proj": [
10531053
"q_proj",

0 commit comments

Comments
 (0)