Skip to content

Commit dec277b

Browse files
committed
gemma3n lora
Signed-off-by: NickLucche <[email protected]>
1 parent d1a55c6 commit dec277b

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

vllm/model_executor/models/gemma3n.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020

2121
import torch
2222
from torch import nn
23-
from transformers.models.gemma3n.configuration_gemma3n import Gemma3nTextConfig
2423

24+
from transformers.models.gemma3n.configuration_gemma3n import Gemma3nTextConfig
2525
from vllm.attention import Attention
2626
from vllm.compilation.decorators import support_torch_compile
2727
from vllm.config import CacheConfig, VllmConfig
@@ -46,7 +46,7 @@
4646
from vllm.model_executor.sampling_metadata import SamplingMetadata
4747
from vllm.sequence import IntermediateTensors
4848

49-
from .interfaces import SupportsQuant
49+
from .interfaces import SupportsLoRA, SupportsQuant
5050
from .utils import (AutoWeightsLoader, extract_layer_index,
5151
is_pp_missing_parameter, make_layers, maybe_prefix)
5252

@@ -762,7 +762,7 @@ def load_weights(self, weights: Iterable[tuple[str,
762762
return loaded_params
763763

764764

765-
class Gemma3nForCausalLM(nn.Module):
765+
class Gemma3nForCausalLM(nn.Module, SupportsLoRA):
766766
packed_modules_mapping = {
767767
"qkv_proj": [
768768
"q_proj",

0 commit comments

Comments
 (0)