Skip to content

Commit 9f64b1c

Browse files
fix: fix run-vllm.py to support vLLM 0.11.0 (#95)
1 parent c3bd1cb commit 9f64b1c

File tree

1 file changed

+5
-1
lines changed

1 file changed

+5
-1
lines changed

run-vllm.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
import torch
88
import transformers
9+
import vllm
910
from transformers import AutoConfig
1011
from vllm import LLM, SamplingParams
1112

@@ -110,7 +111,10 @@ def __call__(
110111
raise ValueError(f"Unsupported mode: {mode}")
111112

112113
outputs = llm.generate(
113-
prompt_token_ids=[task["prompt_token_ids"] for task in tasks],
114+
prompts=[
115+
vllm.inputs.TokensPrompt(prompt_token_ids=task["prompt_token_ids"])
116+
for task in tasks
117+
],
114118
sampling_params=sampling_params,
115119
)
116120
for output in outputs:

0 commit comments

Comments
 (0)