|
7 | 7 | import torch |
8 | 8 |
|
9 | 9 | from torch import nn |
| 10 | + |
10 | 11 | from transformers import AutoModel, BatchFeature |
11 | 12 | from transformers.models.gemma3n import ( |
12 | 13 | Gemma3nAudioConfig, |
|
54 | 55 | from vllm.sequence import IntermediateTensors |
55 | 56 | from vllm.utils.tensor_schema import TensorSchema, TensorShape |
56 | 57 |
|
57 | | -from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsTranscription |
58 | | -from .utils import ( |
59 | | - AutoWeightsLoader, |
60 | | - WeightsMapper, |
61 | | - flatten_bn, |
62 | | - init_vllm_registered_model, |
63 | | - maybe_prefix, |
64 | | -) |
| 58 | +from .interfaces import (MultiModalEmbeddings, SupportsLoRA, SupportsMultiModal, |
| 59 | + SupportsTranscription) |
| 60 | +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, |
| 61 | + init_vllm_registered_model, maybe_prefix) |
65 | 62 |
|
66 | 63 | logger = init_logger(__name__) |
67 | 64 |
|
@@ -456,14 +453,11 @@ def forward( |
456 | 453 | return self.embedding_post_projection_norm(emb_norm_proj) |
457 | 454 |
|
458 | 455 |
|
459 | | -@MULTIMODAL_REGISTRY.register_processor( |
460 | | - Gemma3nMultiModalProcessor, |
461 | | - info=Gemma3nProcessingInfo, |
462 | | - dummy_inputs=Gemma3nDummyInputsBuilder, |
463 | | -) |
464 | | -class Gemma3nForConditionalGeneration( |
465 | | - nn.Module, SupportsMultiModal, SupportsTranscription |
466 | | -): |
| 456 | +@MULTIMODAL_REGISTRY.register_processor(Gemma3nMultiModalProcessor, |
| 457 | + info=Gemma3nProcessingInfo, |
| 458 | + dummy_inputs=Gemma3nDummyInputsBuilder) |
| 459 | +class Gemma3nForConditionalGeneration(nn.Module, SupportsMultiModal, |
| 460 | + SupportsTranscription, SupportsLoRA): |
467 | 461 | merge_by_field_config = True |
468 | 462 | supported_languages = ISO639_1_SUPPORTED_LANGS |
469 | 463 |
|
|
0 commit comments