We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 205b949 commit ad6eca4Copy full SHA for ad6eca4
vllm/engine/llm_engine.py
@@ -13,7 +13,6 @@
13
from vllm.executor.executor_base import ExecutorBase
14
from vllm.logger import init_logger
15
from vllm.lora.request import LoRARequest
16
-from vllm.model_executor.model_loader import get_architecture_class_name
17
from vllm.outputs import RequestOutput
18
from vllm.sampling_params import SamplingParams
19
from vllm.sequence import (MultiModalData, SamplerOutput, Sequence,
@@ -115,6 +114,8 @@ def __init__(
115
114
116
# If usage stat is enabled, collect relevant info.
117
if is_usage_stats_enabled():
+ from vllm.model_executor.model_loader import (
118
+ get_architecture_class_name)
119
usage_message.report_usage(
120
get_architecture_class_name(model_config),
121
usage_context,
0 commit comments