2
2
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
3
4
4
import os
5
- from typing import Optional , Union
5
+ from typing import TYPE_CHECKING , Optional , Union
6
6
7
7
import huggingface_hub
8
8
import regex as re
31
31
RowParallelLinearWithLoRA ,
32
32
VocabParallelEmbeddingWithLoRA )
33
33
from vllm .model_executor .layers .linear import LinearBase
34
+
34
35
# yapf: enable
35
- from vllm .model_executor .layers .logits_processor import LogitsProcessor
36
- from vllm .model_executor .layers .vocab_parallel_embedding import ParallelLMHead
37
- from vllm .model_executor .models .utils import WeightsMapper
36
+
37
+ if TYPE_CHECKING :
38
+ from vllm .model_executor .layers .logits_processor import LogitsProcessor
39
+ from vllm .model_executor .layers .vocab_parallel_embedding import (
40
+ ParallelLMHead )
41
+ from vllm .model_executor .models .utils import WeightsMapper
38
42
39
43
logger = init_logger (__name__ )
40
44
@@ -75,8 +79,8 @@ def from_layer(layer: nn.Module,
75
79
76
80
77
81
def from_layer_logits_processor (
78
- layer : LogitsProcessor ,
79
- lm_head : ParallelLMHead ,
82
+ layer : " LogitsProcessor" ,
83
+ lm_head : " ParallelLMHead" ,
80
84
max_loras : int ,
81
85
lora_config : LoRAConfig ,
82
86
model_config : Optional [PretrainedConfig ] = None ,
@@ -98,8 +102,8 @@ def replace_submodule(model: nn.Module, module_name: str,
98
102
99
103
100
104
def parse_fine_tuned_lora_name (
101
- name : str ,
102
- weights_mapper : Optional [WeightsMapper ] = None
105
+ name : str ,
106
+ weights_mapper : Optional [" WeightsMapper" ] = None
103
107
) -> tuple [str , bool , bool ]:
104
108
"""Parse the name of lora weights.
105
109
0 commit comments