|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the MIT license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | +from __future__ import annotations |
| 6 | + |
| 7 | +import queue |
| 8 | +import threading |
| 9 | +import time |
| 10 | +from typing import Any |
| 11 | + |
| 12 | +from tensordict.base import TensorDictBase |
| 13 | + |
| 14 | +from torchrl.modules.inference_server._transport import InferenceTransport |
| 15 | + |
| 16 | +_SENTINEL = object() |
| 17 | + |
| 18 | + |
| 19 | +class _MonarchFuture: |
| 20 | + """Future-like object for Monarch transport results. |
| 21 | +
|
| 22 | + Args: |
| 23 | + client: the :class:`_MonarchInferenceClient` that created this future. |
| 24 | + req_id: the unique request identifier within that client. |
| 25 | + """ |
| 26 | + |
| 27 | + def __init__(self, client: _MonarchInferenceClient, req_id: int): |
| 28 | + self._client = client |
| 29 | + self._req_id = req_id |
| 30 | + self._result: Any = _SENTINEL |
| 31 | + |
| 32 | + def result(self, timeout: float | None = None) -> TensorDictBase: |
| 33 | + """Block until the result is available.""" |
| 34 | + if self._result is _SENTINEL: |
| 35 | + item = self._client._get_result(self._req_id, timeout=timeout) |
| 36 | + if isinstance(item, BaseException): |
| 37 | + raise item |
| 38 | + self._result = item |
| 39 | + return self._result |
| 40 | + |
| 41 | + |
| 42 | +class _MonarchInferenceClient: |
| 43 | + """Actor-side client for :class:`MonarchTransport`. |
| 44 | +
|
| 45 | + Each client owns a dedicated response queue and routes results by |
| 46 | + request-id. |
| 47 | +
|
| 48 | + Args: |
| 49 | + request_queue: the shared Monarch queue for requests. |
| 50 | + response_queue: this client's dedicated response queue. |
| 51 | + actor_id: the unique identifier assigned by the transport. |
| 52 | + """ |
| 53 | + |
| 54 | + def __init__(self, request_queue, response_queue, actor_id: int): |
| 55 | + self._request_queue = request_queue |
| 56 | + self._response_queue = response_queue |
| 57 | + self._actor_id = actor_id |
| 58 | + self._next_req_id = 0 |
| 59 | + self._buffered: dict[int, Any] = {} |
| 60 | + |
| 61 | + def __call__(self, td: TensorDictBase) -> TensorDictBase: |
| 62 | + """Submit a request and block until the result is ready.""" |
| 63 | + return self.submit(td).result() |
| 64 | + |
| 65 | + def submit(self, td: TensorDictBase) -> _MonarchFuture: |
| 66 | + """Submit a request and return a :class:`_MonarchFuture`.""" |
| 67 | + req_id = self._next_req_id |
| 68 | + self._next_req_id += 1 |
| 69 | + self._request_queue.put((self._actor_id, req_id, td)) |
| 70 | + return _MonarchFuture(self, req_id) |
| 71 | + |
| 72 | + # -- internal ------------------------------------------------------------- |
| 73 | + |
| 74 | + def _get_result(self, req_id: int, timeout: float | None = None) -> Any: |
| 75 | + """Return the result for *req_id*, buffering any earlier arrivals.""" |
| 76 | + if req_id in self._buffered: |
| 77 | + return self._buffered.pop(req_id) |
| 78 | + deadline = None if timeout is None else time.monotonic() + timeout |
| 79 | + while True: |
| 80 | + remaining = None |
| 81 | + if deadline is not None: |
| 82 | + remaining = deadline - time.monotonic() |
| 83 | + if remaining <= 0: |
| 84 | + raise queue.Empty(f"Timeout waiting for result of request {req_id}") |
| 85 | + try: |
| 86 | + rid, result = self._response_queue.get(timeout=remaining) |
| 87 | + except Exception: |
| 88 | + raise queue.Empty(f"Timeout waiting for result of request {req_id}") |
| 89 | + if rid == req_id: |
| 90 | + return result |
| 91 | + self._buffered[rid] = result |
| 92 | + |
| 93 | + |
| 94 | +class MonarchTransport(InferenceTransport): |
| 95 | + """Transport using Monarch for distributed inference on GPU clusters. |
| 96 | +
|
| 97 | + Uses Monarch's actor model and RDMA-capable channels for efficient |
| 98 | + cross-node communication. Monarch is imported lazily at instantiation |
| 99 | + time; importing the class itself does not require Monarch. |
| 100 | +
|
| 101 | + .. note:: |
| 102 | + This transport requires ``monarch`` to be installed. It is designed |
| 103 | + for large-scale GPU clusters where Monarch is the preferred |
| 104 | + communication layer. |
| 105 | +
|
| 106 | + Keyword Args: |
| 107 | + max_queue_size (int): maximum size of the request queue. |
| 108 | + Default: ``1000``. |
| 109 | + """ |
| 110 | + |
| 111 | + def __init__(self, *, max_queue_size: int = 1000): |
| 112 | + try: |
| 113 | + import monarch # noqa: F401 |
| 114 | + from monarch.tools.queue import MonarchQueue |
| 115 | + except ImportError: |
| 116 | + raise ImportError( |
| 117 | + "Monarch is required for MonarchTransport. " |
| 118 | + "Install it following the Monarch documentation." |
| 119 | + ) |
| 120 | + self._request_queue = MonarchQueue(maxsize=max_queue_size) |
| 121 | + self._response_queues: dict[int, Any] = {} |
| 122 | + self._lock = threading.Lock() |
| 123 | + self._next_actor_id = 0 |
| 124 | + self._MonarchQueue = MonarchQueue |
| 125 | + |
| 126 | + # -- actor API ------------------------------------------------------------ |
| 127 | + |
| 128 | + def client(self) -> _MonarchInferenceClient: |
| 129 | + """Create an actor-side client with a dedicated response queue. |
| 130 | +
|
| 131 | + Returns: |
| 132 | + A :class:`_MonarchInferenceClient` that can be passed to a Monarch |
| 133 | + actor. |
| 134 | + """ |
| 135 | + with self._lock: |
| 136 | + actor_id = self._next_actor_id |
| 137 | + self._next_actor_id += 1 |
| 138 | + response_queue = self._MonarchQueue(maxsize=1000) |
| 139 | + self._response_queues[actor_id] = response_queue |
| 140 | + return _MonarchInferenceClient(self._request_queue, response_queue, actor_id) |
| 141 | + |
| 142 | + def submit(self, td: TensorDictBase): |
| 143 | + """Not supported -- use :meth:`client` to obtain an actor handle.""" |
| 144 | + raise RuntimeError( |
| 145 | + "MonarchTransport.submit() is not supported. " |
| 146 | + "Call transport.client() to create a _MonarchInferenceClient." |
| 147 | + ) |
| 148 | + |
| 149 | + # -- server API ----------------------------------------------------------- |
| 150 | + |
| 151 | + def drain( |
| 152 | + self, max_items: int |
| 153 | + ) -> tuple[list[TensorDictBase], list[tuple[int, int]]]: |
| 154 | + """Dequeue up to *max_items* pending requests (non-blocking).""" |
| 155 | + items: list[TensorDictBase] = [] |
| 156 | + callbacks: list[tuple[int, int]] = [] |
| 157 | + for _ in range(max_items): |
| 158 | + try: |
| 159 | + actor_id, req_id, td = self._request_queue.get(block=False) |
| 160 | + items.append(td) |
| 161 | + callbacks.append((actor_id, req_id)) |
| 162 | + except Exception: |
| 163 | + break |
| 164 | + return items, callbacks |
| 165 | + |
| 166 | + def wait_for_work(self, timeout: float) -> None: |
| 167 | + """Block until at least one request is available or *timeout* elapses.""" |
| 168 | + try: |
| 169 | + item = self._request_queue.get(timeout=timeout) |
| 170 | + self._request_queue.put(item) |
| 171 | + except Exception: |
| 172 | + pass |
| 173 | + |
| 174 | + def resolve(self, callback: tuple[int, int], result: TensorDictBase) -> None: |
| 175 | + """Route the result to the correct actor's response queue.""" |
| 176 | + actor_id, req_id = callback |
| 177 | + self._response_queues[actor_id].put((req_id, result)) |
| 178 | + |
| 179 | + def resolve_exception(self, callback: tuple[int, int], exc: BaseException) -> None: |
| 180 | + """Route an exception to the correct actor's response queue.""" |
| 181 | + actor_id, req_id = callback |
| 182 | + self._response_queues[actor_id].put((req_id, exc)) |
0 commit comments