We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent b8fa125 commit 082c4a6Copy full SHA for 082c4a6
vllm_omni/entrypoints/openai/serving_chat.py
@@ -13,7 +13,6 @@
13
from fastapi import Request
14
from PIL import Image
15
from pydantic import TypeAdapter
16
-from vllm.renderers.protocol import BaseRenderer
17
18
from vllm_omni.entrypoints.async_omni import AsyncOmni
19
from vllm_omni.entrypoints.openai.protocol.chat_completion import OmniChatCompletionResponse
@@ -67,7 +66,7 @@
67
66
from vllm.logger import init_logger
68
from vllm.outputs import RequestOutput
69
from vllm.reasoning import ReasoningParser
70
-from vllm.renderers import merge_kwargs
+from vllm.renderers import BaseRenderer, merge_kwargs
71
from vllm.renderers.inputs import TokPrompt
72
from vllm.sampling_params import SamplingParams
73
from vllm.tokenizers import TokenizerLike
0 commit comments