Skip to content

Commit c63cae9

Browse files
author
sangchengmeng
committed
add-qwen3-vl
1 parent 8f97e99 commit c63cae9

File tree

5 files changed

+3
-7
lines changed

5 files changed

+3
-7
lines changed

lightllm/models/llama/model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,7 @@ def _init_to_get_rotary(self, default_base=10000):
206206
/ rope_scaling_factor
207207
)
208208
freqs = torch.outer(t, inv_freq)
209+
209210
self._cos_cached = torch.cos(freqs).to(self.data_type).cuda()
210211
self._sin_cached = torch.sin(freqs).to(self.data_type).cuda()
211212
return

lightllm/models/qwen2_vl/model.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import json
2-
from operator import mul
32
import numpy as np
43
from lightllm.common.basemodel.multimodal_tokenizer import BaseMultiModalTokenizer
54
from lightllm.models.qwen_vl.layer_infer.pre_layer_infer import LlamaMultimodalPreLayerInfer

lightllm/models/qwen2_vl/vision_process.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -275,4 +275,5 @@ def _preprocess_bydevice(self, image, device="cuda") -> Tuple[torch.Tensor, torc
275275

276276
pixel_values = torch.cat(processed_images, dim=0)
277277
image_grid_thw = torch.as_tensor(processed_grids)
278+
278279
return pixel_values, image_grid_thw

lightllm/server/embed_cache/utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import torch
22
import numpy as np
33
from io import BytesIO
4-
from typing import List, Optional
54
import multiprocessing.shared_memory as shm
65

76

lightllm/server/visualserver/model_infer/model_rpc.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,7 @@
1919
from lightllm.models.qwen2_5_vl.qwen2_5_visual import Qwen2_5_VisionTransformerPretrainedModel
2020
from lightllm.models.qwen3_vl.qwen3_visual import Qwen3VisionTransformerPretrainedModel
2121
from lightllm.models.tarsier2.tarsier2_visual import TarsierVisionTransformerPretrainedModel
22-
from lightllm.server.embed_cache.utils import (
23-
tensor2bytes,
24-
create_shm,
25-
get_shm_name_embed,
26-
)
22+
from lightllm.server.embed_cache.utils import tensor2bytes, read_shm, create_shm, get_shm_name_data, get_shm_name_embed
2723
from lightllm.utils.infer_utils import set_random_seed
2824
from lightllm.utils.infer_utils import calculate_time, mark_start, mark_end
2925
from lightllm.utils.dist_utils import init_vision_distributed_env

0 commit comments

Comments
 (0)