Skip to content

Commit a26bcb0

Browse files
njhillepwalsh
authored andcommitted
[BugFix] Don't change title of top-level process (vllm-project#22032)
Signed-off-by: Nick Hill <[email protected]>
1 parent 6c3c358 commit a26bcb0

File tree

2 files changed

+8
-7
lines changed

2 files changed

+8
-7
lines changed

vllm/entrypoints/cli/serve.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@
1818
show_filtered_argument_or_group_from_help)
1919
from vllm.logger import init_logger
2020
from vllm.usage.usage_lib import UsageContext
21-
from vllm.utils import FlexibleArgumentParser, decorate_logs, get_tcp_uri
21+
from vllm.utils import (FlexibleArgumentParser, decorate_logs, get_tcp_uri,
22+
set_process_title)
2223
from vllm.v1.engine.core import EngineCoreProc
2324
from vllm.v1.engine.utils import CoreEngineProcManager, launch_core_engines
2425
from vllm.v1.executor.abstract import Executor
@@ -74,7 +75,7 @@ def run_headless(args: argparse.Namespace):
7475

7576
if args.api_server_count > 1:
7677
raise ValueError("api_server_count can't be set in headless mode")
77-
# set_process_title("Headless_ProcManager")
78+
7879
# Create the EngineConfig.
7980
engine_args = vllm.AsyncEngineArgs.from_cli_args(args)
8081
usage_context = UsageContext.OPENAI_API_SERVER
@@ -139,8 +140,6 @@ def run_multi_api_server(args: argparse.Namespace):
139140

140141
orig_disable_mm_preprocessor_cache = args.disable_mm_preprocessor_cache
141142

142-
# set_process_title("ProcManager")
143-
144143
if num_api_servers > 1:
145144
setup_multiprocess_prometheus()
146145

@@ -225,7 +224,9 @@ def run_api_server_worker_proc(listen_address,
225224
**uvicorn_kwargs) -> None:
226225
"""Entrypoint for individual API server worker processes."""
227226

228-
# Add process-specific prefix to stdout and stderr.
227+
# Set process title and add process-specific prefix to stdout and stderr.
228+
server_index = client_config.get("client_index", 0) if client_config else 0
229+
set_process_title("APIServer", str(server_index))
229230
decorate_logs()
230231

231232
uvloop.run(

vllm/entrypoints/openai/api_server.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@
102102
from vllm.usage.usage_lib import UsageContext
103103
from vllm.utils import (Device, FlexibleArgumentParser, decorate_logs,
104104
get_open_zmq_ipc_path, is_valid_ipv6_address,
105-
set_process_title, set_ulimit)
105+
set_ulimit)
106106
from vllm.v1.metrics.prometheus import get_prometheus_registry
107107
from vllm.version import __version__ as VLLM_VERSION
108108

@@ -1824,7 +1824,7 @@ async def run_server_worker(listen_address,
18241824
ToolParserManager.import_tool_parser(args.tool_parser_plugin)
18251825

18261826
server_index = client_config.get("client_index", 0) if client_config else 0
1827-
set_process_title("APIServer", str(server_index))
1827+
18281828
# Load logging config for uvicorn if specified
18291829
log_config = load_log_config(args.log_config_file)
18301830
if log_config is not None:

0 commit comments

Comments
 (0)