Skip to content

Commit 59ec94b

Browse files
committed
fix
1 parent 797fc55 commit 59ec94b

File tree

4 files changed

+0
-11
lines changed

4 files changed

+0
-11
lines changed

.gitignore

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,3 @@ dist
66
.idea
77
.vscode
88
tmp/
9-
ref/

lightllm/server/httpserver/manager.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -272,18 +272,12 @@ async def generate(
272272
original_multimodal_params = copy.deepcopy(multimodal_params)
273273

274274
if self.pd_mode.is_P_or_NORMAL():
275-
print(f"[debug] generate verify_and_preload: {multimodal_params.to_dict()}")
276275
await multimodal_params.verify_and_preload(request)
277276

278277
# 记录请求到达的相关信息
279278
await self._log_req_header(request_headers, group_request_id)
280279
# 监控
281280

282-
print(
283-
f"[debug] generate request: {prompt}, \
284-
sampling_params: {sampling_params.to_dict()}, \
285-
multimodal_params: {multimodal_params.to_dict()}"
286-
)
287281
# 给img id
288282
prompt_ids = await self._encode(prompt, multimodal_params, sampling_params)
289283
prompt_tokens = len(prompt_ids)
@@ -402,8 +396,6 @@ async def _encode(
402396
if multimodal_params.audios:
403397
assert self.args.enable_multimodal_audio, "audio multimodal not enabled"
404398
await self._alloc_multimodal_resources(multimodal_params, sampling_params)
405-
print(f"[debug] _encode: {prompt}, multimodal_params: {multimodal_params.to_dict()}")
406-
print(f"[debug] model_name: {self.args.model_name}, model_path: {self.args.model_dir}")
407399
prompt_ids = self.tokenizer.encode(
408400
prompt, multimodal_params, add_special_tokens=sampling_params.add_special_tokens
409401
)

lightllm/server/tokenizer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ def get_tokenizer(
8787
elif model_type == "qwen" and "visual" in model_cfg:
8888
tokenizer = QWenVLTokenizer(tokenizer, model_cfg)
8989
elif model_type == "mineru2_qwen":
90-
print(f"[debug] use mineru2_qwen tokenizer, model_cfg: {model_cfg}")
9190
tokenizer = Mineru2QwenTokenizer(tokenizer, model_cfg)
9291
elif model_type in ["qwen2_vl", "qwen2_5_vl"] and "vision_config" in model_cfg:
9392
from transformers import AutoProcessor

lightllm/server/visualserver/manager.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,6 @@ async def loop_for_fwd(self):
110110
processing_group_reqs = []
111111
images_need_infer = []
112112
while len(self.waiting_reqs) > 0:
113-
print(f"[debug] loop_for_fwd waiting_reqs={len(self.waiting_reqs)}")
114113
group_req_indexes = self.waiting_reqs.pop(0)
115114
shm_req = self.shm_req_manager.get_req_obj_by_index(group_req_indexes.shm_req_indexes[0])
116115
is_aborted = shm_req.is_aborted

0 commit comments

Comments
 (0)