|
3 | 3 | from lightllm.models.internlm2.model import Internlm2TpPartModel |
4 | 4 | from lightllm.models.llama.model import LlamaTpPartModel |
5 | 5 | from lightllm.models.phi3.model import Phi3TpPartModel |
| 6 | +from lightllm.models.qwen2.model import Qwen2TpPartModel |
6 | 7 | from lightllm.models.qwen_vl.layer_infer.pre_layer_infer import LlamaMultimodalPreLayerInfer |
7 | 8 | from lightllm.server.multimodal_params import MultimodalParams, ImageItem |
8 | 9 | from lightllm.common.build_utils import repair_config |
@@ -145,3 +146,26 @@ def _init_config(self): |
145 | 146 | if self.finetune_config: |
146 | 147 | self.config["vocab_size"] = self.finetune_config.vocab_size |
147 | 148 | return |
| 149 | + |
| 150 | + |
| 151 | +class InternVLQwen2TpPartModel(Qwen2TpPartModel): |
| 152 | + # weight class |
| 153 | + pre_and_post_weight_class = InternVLLlamaPreAndPostLayerWeight |
| 154 | + |
| 155 | + # infer class |
| 156 | + pre_layer_infer_class = LlamaMultimodalPreLayerInfer |
| 157 | + |
| 158 | + def __init__(self, kvargs): |
| 159 | + super().__init__(kvargs) |
| 160 | + return |
| 161 | + |
| 162 | + def _init_config(self): |
| 163 | + with open(os.path.join(self.weight_dir_, "config.json"), "r") as json_file: |
| 164 | + self.config = json.load(json_file)["llm_config"] |
| 165 | + # rename keys |
| 166 | + repair_config(self.config, same_names=["num_attention_heads", "n_head"]) |
| 167 | + repair_config(self.config, same_names=["hidden_size", "n_embd", "n_embed"]) |
| 168 | + repair_config(self.config, same_names=["num_hidden_layers", "n_layer"]) |
| 169 | + if self.finetune_config: |
| 170 | + self.config["vocab_size"] = self.finetune_config.vocab_size |
| 171 | + return |
0 commit comments