Skip to content

Commit 6e78ed6

Browse files
[Logs] Optimize startup logs 4 (vllm-project#29903)
Signed-off-by: yewentao256 <[email protected]> Signed-off-by: Wentao Ye <[email protected]> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
1 parent 7c16f3f commit 6e78ed6

File tree

5 files changed

+20
-15
lines changed

5 files changed

+20
-15
lines changed

vllm/model_executor/layers/fused_moe/fused_moe.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -885,12 +885,11 @@ def get_moe_configs(
885885

886886
# If no optimized configuration is available, we will use the default
887887
# configuration
888-
logger.warning(
889-
(
890-
"Using default MoE config. Performance might be sub-optimal! "
891-
"Config file not found at %s"
892-
),
893-
config_file_paths,
888+
logger.warning_once(
889+
"Using default MoE config. Performance might be sub-optimal! "
890+
"Config file not found at %s",
891+
", ".join(config_file_paths),
892+
scope="local",
894893
)
895894
return None
896895

vllm/model_executor/layers/fused_moe/layer.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,9 @@ def __init__(
369369
# aux_stream() returns None on non-cuda-alike platforms.
370370
self.shared_experts_stream = aux_stream()
371371
if self.shared_experts_stream is not None:
372-
logger.info_once("Enabled separate cuda stream for MoE shared_experts")
372+
logger.info_once(
373+
"Enabled separate cuda stream for MoE shared_experts", scope="local"
374+
)
373375

374376
if params_dtype is None:
375377
params_dtype = torch.get_default_dtype()

vllm/platforms/cuda.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -409,10 +409,11 @@ def get_attn_backend_cls(
409409
)
410410
selected_index = sorted_indices[0]
411411
selected_backend = valid_backends_priorities[selected_index][0]
412-
logger.info(
412+
logger.info_once(
413413
"Using %s attention backend out of potential backends: %s",
414414
selected_backend.name,
415-
[b[0].name for b in valid_backends_priorities],
415+
tuple(b[0].name for b in valid_backends_priorities),
416+
scope="local",
416417
)
417418

418419
return selected_backend.get_path()

vllm/profiler/wrapper.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def _call_stop(self) -> None:
6161
"""Call _stop with error handling but no safeguards."""
6262
try:
6363
self._stop()
64-
logger.info("Profiler stopped successfully.")
64+
logger.info_once("Profiler stopped successfully.", scope="local")
6565
except Exception as e:
6666
logger.warning("Failed to stop profiler: %s", e)
6767
self._running = False # Always mark as not running, assume stop worked
@@ -91,7 +91,7 @@ def step(self) -> None:
9191
and self._delay_iters > 0
9292
and self._active_iteration_count == self._delay_iters
9393
):
94-
logger.info("Starting profiler after delay...")
94+
logger.info_once("Starting profiler after delay...", scope="local")
9595
self._call_start()
9696

9797
if self._running:
@@ -105,7 +105,9 @@ def step(self) -> None:
105105
# Automatically stop the profiler after max iters
106106
# will be marked as not running, but leave as active so that stop
107107
# can clean up properly
108-
logger.info("Max profiling iterations reached. Stopping profiler...")
108+
logger.info_once(
109+
"Max profiling iterations reached. Stopping profiler...", scope="local"
110+
)
109111
self._call_stop()
110112
return
111113

@@ -125,7 +127,7 @@ def stop(self) -> None:
125127

126128
def shutdown(self) -> None:
127129
"""Ensure profiler is stopped when shutting down."""
128-
logger.info_once("Shutting down profiler")
130+
logger.info_once("Shutting down profiler", scope="local")
129131
if self._running:
130132
self.stop()
131133

@@ -156,9 +158,10 @@ def __init__(
156158
self.profiler_config = profiler_config
157159
torch_profiler_trace_dir = profiler_config.torch_profiler_dir
158160
if local_rank in (None, 0):
159-
logger.info(
161+
logger.info_once(
160162
"Torch profiling enabled. Traces will be saved to: %s",
161163
torch_profiler_trace_dir,
164+
scope="local",
162165
)
163166
logger.debug(
164167
"Profiler config: record_shapes=%s,"

vllm/v1/executor/multiproc_executor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -706,7 +706,7 @@ def monitor_parent_death():
706706
death_pipe.recv()
707707
except EOFError:
708708
# Parent process has exited, terminate this worker
709-
logger.info("Parent process exited, terminating worker")
709+
logger.info_once("Parent process exited, terminating worker")
710710
# Send signal to self to trigger clean shutdown
711711
shutdown_event.set()
712712
except Exception as e:

0 commit comments

Comments
 (0)