Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 22 additions & 2 deletions vllm_omni/distributed/ray_utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,26 @@ def run(self, func, *args, **kwargs):
runtime_env={"env_vars": {"PYTHONPATH": os.environ.get("PYTHONPATH", "")}, "CUDA_LAUNCH_BLOCKING": "1"},
).remote()

worker_actor.run.remote(worker_entry_fn, *args, **kwargs)
task_ref = worker_actor.run.remote(worker_entry_fn, *args, **kwargs)

return worker_actor
return worker_actor, task_ref


def is_ray_task_alive(task_ref: Any, **kwargs):
"""Checks ray task status. Returns FALSE if ray task has exited for any reason."""
if not RAY_AVAILABLE:
raise ImportError("ray is required to query ray tasks")

ready, _ = ray.wait([task_ref], **kwargs)
return not bool(ready)


def get_ray_task_error(task_ref: Any, **kwargs) -> Exception | None:
"""Gets ray task. Returns RayTaskError if ray instance exited with any error, else None."""
if not RAY_AVAILABLE:
raise ImportError("ray is required to query ray tasks")

try:
ray.get(task_ref, **kwargs)
except Exception as e:
return e
27 changes: 21 additions & 6 deletions vllm_omni/entrypoints/omni_stage.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,12 @@
from vllm_omni.distributed.omni_connectors import build_stage_connectors
from vllm_omni.distributed.omni_connectors.adapter import try_recv_via_connector
from vllm_omni.distributed.omni_connectors.connectors.base import OmniConnectorBase
from vllm_omni.distributed.ray_utils.utils import kill_ray_actor, start_ray_actor
from vllm_omni.distributed.ray_utils.utils import (
get_ray_task_error,
is_ray_task_alive,
kill_ray_actor,
start_ray_actor,
)
from vllm_omni.engine.arg_utils import AsyncOmniEngineArgs, OmniEngineArgs
from vllm_omni.entrypoints.async_omni_diffusion import AsyncOmniDiffusion
from vllm_omni.entrypoints.async_omni_llm import AsyncOmniLLM
Expand Down Expand Up @@ -301,6 +306,8 @@ def __init__(self, stage_config: Any, stage_init_timeout: int = 300):
self._in_q: mp.queues.Queue | ZmqQueue | str | None = None
self._out_q: mp.queues.Queue | ZmqQueue | str | None = None
self._proc: mp.Process | None = None
self._ray_actor: Any | None = None
self._ray_task_ref: Any | None = None
self._shm_threshold_bytes: int = 65536
self._stage_init_timeout: int = stage_init_timeout

Expand Down Expand Up @@ -472,7 +479,7 @@ def init_stage_worker(
os.environ["VLLM_LOGGING_PREFIX"] = new_env
if worker_backend == "ray":
if is_async:
self._ray_actor = start_ray_actor(
self._ray_actor, self._ray_task_ref = start_ray_actor(
_stage_worker_async_entry,
ray_placement_group,
self.stage_id,
Expand All @@ -484,7 +491,7 @@ def init_stage_worker(
stage_init_timeout=self._stage_init_timeout,
)
else:
self._ray_actor = start_ray_actor(
self._ray_actor, self._ray_task_ref = start_ray_actor(
_stage_worker,
ray_placement_group,
self.stage_id,
Expand Down Expand Up @@ -547,9 +554,10 @@ def stop_stage_worker(self) -> None:
if callable(close_fn):
close_fn()

if hasattr(self, "_ray_actor") and self._ray_actor:
if self._ray_actor is not None:
kill_ray_actor(self._ray_actor)
self._ray_actor = None
self._ray_task_ref = None
elif self._proc is not None:
try:
self._proc.join(timeout=5)
Expand Down Expand Up @@ -611,8 +619,15 @@ def try_collect(self) -> dict[str, Any] | None:
assert self._out_q is not None
try:
return self._out_q.get_nowait()
except Exception:
return None
except queue.Empty:
pass
except Exception as e:
logger.error("Unexpected error when collecting OmniStage output queue:", exc_info=e)
self.stop_stage_worker()
raise
if self._ray_task_ref is not None and not is_ray_task_alive(self._ray_task_ref, timeout=0):
e = get_ray_task_error(self._ray_task_ref, timeout=0)
raise RuntimeError("OmniStage Ray actor died unexpectedly") from e

def process_engine_inputs(
self, stage_list: list[Any], prompt: OmniTokensPrompt | TextPrompt = None
Expand Down